code
stringlengths
0
23.9M
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/audit_dir_write.h> __NR_acct, #ifdef __NR_swapon __NR_swapon, #endif __NR_quotactl, #ifdef __NR_truncate __NR_truncate, #endif #ifdef __NR_truncate64 __NR_truncate64, #endif #ifdef __NR_ftruncate __NR_ftruncate, #endif #ifdef __NR_ftruncate64 __NR_ftruncate64, #endif #ifdef __NR_bind __NR_bind, /* bind can affect fs object only in one way... */ #endif #ifdef __NR_fallocate __NR_fallocate, #endif
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #include <linux/slab.h> #include <ia_css_host_data.h> #include <sh_css_internal.h> struct ia_css_host_data *ia_css_host_data_allocate(size_t size) { struct ia_css_host_data *me; me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL); if (!me) return NULL; me->size = (uint32_t)size; me->address = kvmalloc(size, GFP_KERNEL); if (!me->address) { kfree(me); return NULL; } return me; } void ia_css_host_data_free(struct ia_css_host_data *me) { if (me) { kvfree(me->address); me->address = NULL; kfree(me); } }
// SPDX-License-Identifier: GPL-2.0 /* * Driver for FPGA Management Engine (FME) Partial Reconfiguration * * Copyright (C) 2017-2018 Intel Corporation, Inc. * * Authors: * Kang Luwei <[email protected]> * Xiao Guangrong <[email protected]> * Wu Hao <[email protected]> * Joseph Grecco <[email protected]> * Enno Luebbers <[email protected]> * Tim Whisonant <[email protected]> * Ananda Ravuri <[email protected]> * Christopher Rauer <[email protected]> * Henry Mitchel <[email protected]> */ #include <linux/types.h> #include <linux/device.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <linux/fpga/fpga-mgr.h> #include <linux/fpga/fpga-bridge.h> #include <linux/fpga/fpga-region.h> #include <linux/fpga-dfl.h> #include "dfl.h" #include "dfl-fme.h" #include "dfl-fme-pr.h" static struct dfl_fme_region * dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id) { struct dfl_fme_region *fme_region; list_for_each_entry(fme_region, &fme->region_list, node) if (fme_region->port_id == port_id) return fme_region; return NULL; } static int dfl_fme_region_match(struct device *dev, const void *data) { return dev->parent == data; } static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id) { struct dfl_fme_region *fme_region; struct fpga_region *region; fme_region = dfl_fme_region_find_by_port_id(fme, port_id); if (!fme_region) return NULL; region = fpga_region_class_find(NULL, &fme_region->region->dev, dfl_fme_region_match); if (!region) return NULL; return region; } static int fme_pr(struct platform_device *pdev, unsigned long arg) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); void __user *argp = (void __user *)arg; struct dfl_fpga_fme_port_pr port_pr; struct fpga_image_info *info; struct fpga_region *region; void __iomem *fme_hdr; struct dfl_fme *fme; unsigned long minsz; void *buf = NULL; size_t length; int ret = 0; u64 v; minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address); if (copy_from_user(&port_pr, argp, minsz)) return -EFAULT; if (port_pr.argsz < minsz || port_pr.flags) return -EINVAL; /* get fme header region */ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, FME_FEATURE_ID_HEADER); /* check port id */ v = readq(fme_hdr + FME_HDR_CAP); if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) { dev_dbg(&pdev->dev, "port number more than maximum\n"); return -EINVAL; } /* * align PR buffer per PR bandwidth, as HW ignores the extra padding * data automatically. */ length = ALIGN(port_pr.buffer_size, 4); buf = vmalloc(length); if (!buf) return -ENOMEM; if (copy_from_user(buf, (void __user *)(unsigned long)port_pr.buffer_address, port_pr.buffer_size)) { ret = -EFAULT; goto free_exit; } /* prepare fpga_image_info for PR */ info = fpga_image_info_alloc(&pdev->dev); if (!info) { ret = -ENOMEM; goto free_exit; } info->flags |= FPGA_MGR_PARTIAL_RECONFIG; mutex_lock(&pdata->lock); fme = dfl_fpga_pdata_get_private(pdata); /* fme device has been unregistered. */ if (!fme) { ret = -EINVAL; goto unlock_exit; } region = dfl_fme_region_find(fme, port_pr.port_id); if (!region) { ret = -EINVAL; goto unlock_exit; } fpga_image_info_free(region->info); info->buf = buf; info->count = length; info->region_id = port_pr.port_id; region->info = info; ret = fpga_region_program_fpga(region); /* * it allows userspace to reset the PR region's logic by disabling and * reenabling the bridge to clear things out between acceleration runs. * so no need to hold the bridges after partial reconfiguration. */ if (region->get_bridges) fpga_bridges_put(&region->bridge_list); put_device(&region->dev); unlock_exit: mutex_unlock(&pdata->lock); free_exit: vfree(buf); return ret; } /** * dfl_fme_create_mgr - create fpga mgr platform device as child device * @feature: sub feature info * @pdata: fme platform_device's pdata * * Return: mgr platform device if successful, and error code otherwise. */ static struct platform_device * dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata, struct dfl_feature *feature) { struct platform_device *mgr, *fme = pdata->dev; struct dfl_fme_mgr_pdata mgr_pdata; int ret = -ENOMEM; if (!feature->ioaddr) return ERR_PTR(-ENODEV); mgr_pdata.ioaddr = feature->ioaddr; /* * Each FME has only one fpga-mgr, so allocate platform device using * the same FME platform device id. */ mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id); if (!mgr) return ERR_PTR(ret); mgr->dev.parent = &fme->dev; ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata)); if (ret) goto create_mgr_err; ret = platform_device_add(mgr); if (ret) goto create_mgr_err; return mgr; create_mgr_err: platform_device_put(mgr); return ERR_PTR(ret); } /** * dfl_fme_destroy_mgr - destroy fpga mgr platform device * @pdata: fme platform device's pdata */ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata) { struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); platform_device_unregister(priv->mgr); } /** * dfl_fme_create_bridge - create fme fpga bridge platform device as child * * @pdata: fme platform device's pdata * @port_id: port id for the bridge to be created. * * Return: bridge platform device if successful, and error code otherwise. */ static struct dfl_fme_bridge * dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id) { struct device *dev = &pdata->dev->dev; struct dfl_fme_br_pdata br_pdata; struct dfl_fme_bridge *fme_br; int ret = -ENOMEM; fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL); if (!fme_br) return ERR_PTR(ret); br_pdata.cdev = pdata->dfl_cdev; br_pdata.port_id = port_id; fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE, PLATFORM_DEVID_AUTO); if (!fme_br->br) return ERR_PTR(ret); fme_br->br->dev.parent = dev; ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata)); if (ret) goto create_br_err; ret = platform_device_add(fme_br->br); if (ret) goto create_br_err; return fme_br; create_br_err: platform_device_put(fme_br->br); return ERR_PTR(ret); } /** * dfl_fme_destroy_bridge - destroy fpga bridge platform device * @fme_br: fme bridge to destroy */ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br) { platform_device_unregister(fme_br->br); } /** * dfl_fme_destroy_bridges - destroy all fpga bridge platform device * @pdata: fme platform device's pdata */ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata) { struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); struct dfl_fme_bridge *fbridge, *tmp; list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) { list_del(&fbridge->node); dfl_fme_destroy_bridge(fbridge); } } /** * dfl_fme_create_region - create fpga region platform device as child * * @pdata: fme platform device's pdata * @mgr: mgr platform device needed for region * @br: br platform device needed for region * @port_id: port id * * Return: fme region if successful, and error code otherwise. */ static struct dfl_fme_region * dfl_fme_create_region(struct dfl_feature_platform_data *pdata, struct platform_device *mgr, struct platform_device *br, int port_id) { struct dfl_fme_region_pdata region_pdata; struct device *dev = &pdata->dev->dev; struct dfl_fme_region *fme_region; int ret = -ENOMEM; fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL); if (!fme_region) return ERR_PTR(ret); region_pdata.mgr = mgr; region_pdata.br = br; /* * Each FPGA device may have more than one port, so allocate platform * device using the same port platform device id. */ fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id); if (!fme_region->region) return ERR_PTR(ret); fme_region->region->dev.parent = dev; ret = platform_device_add_data(fme_region->region, &region_pdata, sizeof(region_pdata)); if (ret) goto create_region_err; ret = platform_device_add(fme_region->region); if (ret) goto create_region_err; fme_region->port_id = port_id; return fme_region; create_region_err: platform_device_put(fme_region->region); return ERR_PTR(ret); } /** * dfl_fme_destroy_region - destroy fme region * @fme_region: fme region to destroy */ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region) { platform_device_unregister(fme_region->region); } /** * dfl_fme_destroy_regions - destroy all fme regions * @pdata: fme platform device's pdata */ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata) { struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); struct dfl_fme_region *fme_region, *tmp; list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) { list_del(&fme_region->node); dfl_fme_destroy_region(fme_region); } } static int pr_mgmt_init(struct platform_device *pdev, struct dfl_feature *feature) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); struct dfl_fme_region *fme_region; struct dfl_fme_bridge *fme_br; struct platform_device *mgr; struct dfl_fme *priv; void __iomem *fme_hdr; int ret = -ENODEV, i = 0; u64 fme_cap, port_offset; fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, FME_FEATURE_ID_HEADER); mutex_lock(&pdata->lock); priv = dfl_fpga_pdata_get_private(pdata); /* Initialize the region and bridge sub device list */ INIT_LIST_HEAD(&priv->region_list); INIT_LIST_HEAD(&priv->bridge_list); /* Create fpga mgr platform device */ mgr = dfl_fme_create_mgr(pdata, feature); if (IS_ERR(mgr)) { dev_err(&pdev->dev, "fail to create fpga mgr pdev\n"); goto unlock; } priv->mgr = mgr; /* Read capability register to check number of regions and bridges */ fme_cap = readq(fme_hdr + FME_HDR_CAP); for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) { port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i)); if (!(port_offset & FME_PORT_OFST_IMP)) continue; /* Create bridge for each port */ fme_br = dfl_fme_create_bridge(pdata, i); if (IS_ERR(fme_br)) { ret = PTR_ERR(fme_br); goto destroy_region; } list_add(&fme_br->node, &priv->bridge_list); /* Create region for each port */ fme_region = dfl_fme_create_region(pdata, mgr, fme_br->br, i); if (IS_ERR(fme_region)) { ret = PTR_ERR(fme_region); goto destroy_region; } list_add(&fme_region->node, &priv->region_list); } mutex_unlock(&pdata->lock); return 0; destroy_region: dfl_fme_destroy_regions(pdata); dfl_fme_destroy_bridges(pdata); dfl_fme_destroy_mgr(pdata); unlock: mutex_unlock(&pdata->lock); return ret; } static void pr_mgmt_uinit(struct platform_device *pdev, struct dfl_feature *feature) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); mutex_lock(&pdata->lock); dfl_fme_destroy_regions(pdata); dfl_fme_destroy_bridges(pdata); dfl_fme_destroy_mgr(pdata); mutex_unlock(&pdata->lock); } static long fme_pr_ioctl(struct platform_device *pdev, struct dfl_feature *feature, unsigned int cmd, unsigned long arg) { long ret; switch (cmd) { case DFL_FPGA_FME_PORT_PR: ret = fme_pr(pdev, arg); break; default: ret = -ENODEV; } return ret; } const struct dfl_feature_id fme_pr_mgmt_id_table[] = { {.id = FME_FEATURE_ID_PR_MGMT,}, {0} }; const struct dfl_feature_ops fme_pr_mgmt_ops = { .init = pr_mgmt_init, .uinit = pr_mgmt_uinit, .ioctl = fme_pr_ioctl, };
// SPDX-License-Identifier: GPL-2.0 /* * R-Car THS/TSC thermal sensor driver * * Copyright (C) 2012 Renesas Solutions Corp. * Kuninori Morimoto <[email protected]> */ #include <linux/delay.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/thermal.h> #include "../thermal_hwmon.h" #define IDLE_INTERVAL 5000 #define COMMON_STR 0x00 #define COMMON_ENR 0x04 #define COMMON_INTMSK 0x0c #define REG_POSNEG 0x20 #define REG_FILONOFF 0x28 #define REG_THSCR 0x2c #define REG_THSSR 0x30 #define REG_INTCTRL 0x34 /* THSCR */ #define CPCTL (1 << 12) /* THSSR */ #define CTEMP 0x3f struct rcar_thermal_common { void __iomem *base; struct device *dev; struct list_head head; spinlock_t lock; }; struct rcar_thermal_chip { unsigned int use_of_thermal : 1; unsigned int has_filonoff : 1; unsigned int irq_per_ch : 1; unsigned int needs_suspend_resume : 1; unsigned int nirqs; unsigned int ctemp_bands; }; static const struct rcar_thermal_chip rcar_thermal = { .use_of_thermal = 0, .has_filonoff = 1, .irq_per_ch = 0, .needs_suspend_resume = 0, .nirqs = 1, .ctemp_bands = 1, }; static const struct rcar_thermal_chip rcar_gen2_thermal = { .use_of_thermal = 1, .has_filonoff = 1, .irq_per_ch = 0, .needs_suspend_resume = 0, .nirqs = 1, .ctemp_bands = 1, }; static const struct rcar_thermal_chip rcar_gen3_thermal = { .use_of_thermal = 1, .has_filonoff = 0, .irq_per_ch = 1, .needs_suspend_resume = 1, /* * The Gen3 chip has 3 interrupts, but this driver uses only 2 * interrupts to detect a temperature change, rise or fall. */ .nirqs = 2, .ctemp_bands = 2, }; struct rcar_thermal_priv { void __iomem *base; struct rcar_thermal_common *common; struct thermal_zone_device *zone; const struct rcar_thermal_chip *chip; struct delayed_work work; struct mutex lock; struct list_head list; int id; }; #define rcar_thermal_for_each_priv(pos, common) \ list_for_each_entry(pos, &common->head, list) #define MCELSIUS(temp) ((temp) * 1000) #define rcar_priv_to_dev(priv) ((priv)->common->dev) #define rcar_has_irq_support(priv) ((priv)->common->base) #define rcar_id_to_shift(priv) ((priv)->id * 8) static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", .data = &rcar_thermal, }, { .compatible = "renesas,rcar-gen2-thermal", .data = &rcar_gen2_thermal, }, { .compatible = "renesas,thermal-r8a774c0", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77970", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77990", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77995", .data = &rcar_gen3_thermal, }, {}, }; MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); /* * basic functions */ #define rcar_thermal_common_read(c, r) \ _rcar_thermal_common_read(c, COMMON_ ##r) static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common, u32 reg) { return ioread32(common->base + reg); } #define rcar_thermal_common_write(c, r, d) \ _rcar_thermal_common_write(c, COMMON_ ##r, d) static void _rcar_thermal_common_write(struct rcar_thermal_common *common, u32 reg, u32 data) { iowrite32(data, common->base + reg); } #define rcar_thermal_common_bset(c, r, m, d) \ _rcar_thermal_common_bset(c, COMMON_ ##r, m, d) static void _rcar_thermal_common_bset(struct rcar_thermal_common *common, u32 reg, u32 mask, u32 data) { u32 val; val = ioread32(common->base + reg); val &= ~mask; val |= (data & mask); iowrite32(val, common->base + reg); } #define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r) static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg) { return ioread32(priv->base + reg); } #define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d) static void _rcar_thermal_write(struct rcar_thermal_priv *priv, u32 reg, u32 data) { iowrite32(data, priv->base + reg); } #define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d) static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, u32 mask, u32 data) { u32 val; val = ioread32(priv->base + reg); val &= ~mask; val |= (data & mask); iowrite32(val, priv->base + reg); } /* * zone device functions */ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) { struct device *dev = rcar_priv_to_dev(priv); int old, new, ctemp = -EINVAL; unsigned int i; mutex_lock(&priv->lock); /* * TSC decides a value of CPTAP automatically, * and this is the conditions which validate interrupt. */ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL); old = ~0; for (i = 0; i < 128; i++) { /* * we need to wait 300us after changing comparator offset * to get stable temperature. * see "Usage Notes" on datasheet */ usleep_range(300, 400); new = rcar_thermal_read(priv, THSSR) & CTEMP; if (new == old) { ctemp = new; break; } old = new; } if (ctemp < 0) { dev_err(dev, "thermal sensor was broken\n"); goto err_out_unlock; } /* * enable IRQ */ if (rcar_has_irq_support(priv)) { if (priv->chip->has_filonoff) rcar_thermal_write(priv, FILONOFF, 0); /* enable Rising/Falling edge interrupt */ rcar_thermal_write(priv, POSNEG, 0x1); rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) | ((ctemp - 1) << 0))); } err_out_unlock: mutex_unlock(&priv->lock); return ctemp; } static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, int *temp) { int ctemp; ctemp = rcar_thermal_update_temp(priv); if (ctemp < 0) return ctemp; /* Guaranteed operating range is -45C to 125C. */ if (priv->chip->ctemp_bands == 1) *temp = MCELSIUS((ctemp * 5) - 65); else if (ctemp < 24) *temp = MCELSIUS(((ctemp * 55) - 720) / 10); else *temp = MCELSIUS((ctemp * 5) - 60); return 0; } static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) { struct rcar_thermal_priv *priv = thermal_zone_device_priv(zone); return rcar_thermal_get_current_temp(priv, temp); } static struct thermal_zone_device_ops rcar_thermal_zone_ops = { .get_temp = rcar_thermal_get_temp, }; static struct thermal_trip trips[] = { { .type = THERMAL_TRIP_CRITICAL, .temperature = 90000 } }; /* * interrupt */ #define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1) #define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0) static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable) { struct rcar_thermal_common *common = priv->common; unsigned long flags; u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */ if (!rcar_has_irq_support(priv)) return; spin_lock_irqsave(&common->lock, flags); rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask); spin_unlock_irqrestore(&common->lock, flags); } static void rcar_thermal_work(struct work_struct *work) { struct rcar_thermal_priv *priv; int ret; priv = container_of(work, struct rcar_thermal_priv, work.work); ret = rcar_thermal_update_temp(priv); if (ret < 0) return; rcar_thermal_irq_enable(priv); thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED); } static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status) { struct device *dev = rcar_priv_to_dev(priv); status = (status >> rcar_id_to_shift(priv)) & 0x3; if (status) { dev_dbg(dev, "thermal%d %s%s\n", priv->id, (status & 0x2) ? "Rising " : "", (status & 0x1) ? "Falling" : ""); } return status; } static irqreturn_t rcar_thermal_irq(int irq, void *data) { struct rcar_thermal_common *common = data; struct rcar_thermal_priv *priv; u32 status, mask; spin_lock(&common->lock); mask = rcar_thermal_common_read(common, INTMSK); status = rcar_thermal_common_read(common, STR); rcar_thermal_common_write(common, STR, 0x000F0F0F & mask); spin_unlock(&common->lock); status = status & ~mask; /* * check the status */ rcar_thermal_for_each_priv(priv, common) { if (rcar_thermal_had_changed(priv, status)) { rcar_thermal_irq_disable(priv); queue_delayed_work(system_freezable_wq, &priv->work, msecs_to_jiffies(300)); } } return IRQ_HANDLED; } /* * platform functions */ static void rcar_thermal_remove(struct platform_device *pdev) { struct rcar_thermal_common *common = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct rcar_thermal_priv *priv; rcar_thermal_for_each_priv(priv, common) { rcar_thermal_irq_disable(priv); cancel_delayed_work_sync(&priv->work); if (priv->chip->use_of_thermal) thermal_remove_hwmon_sysfs(priv->zone); else thermal_zone_device_unregister(priv->zone); } pm_runtime_put(dev); pm_runtime_disable(dev); } static int rcar_thermal_probe(struct platform_device *pdev) { struct rcar_thermal_common *common; struct rcar_thermal_priv *priv; struct device *dev = &pdev->dev; struct resource *res; const struct rcar_thermal_chip *chip = of_device_get_match_data(dev); int mres = 0; int i; int ret = -ENODEV; int idle = IDLE_INTERVAL; u32 enr_bits = 0; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) return -ENOMEM; platform_set_drvdata(pdev, common); INIT_LIST_HEAD(&common->head); spin_lock_init(&common->lock); common->dev = dev; pm_runtime_enable(dev); pm_runtime_get_sync(dev); for (i = 0; i < chip->nirqs; i++) { int irq; ret = platform_get_irq_optional(pdev, i); if (ret < 0 && ret != -ENXIO) goto error_unregister; if (ret > 0) irq = ret; else break; if (!common->base) { /* * platform has IRQ support. * Then, driver uses common registers * rcar_has_irq_support() will be enabled */ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); if (IS_ERR(common->base)) { ret = PTR_ERR(common->base); goto error_unregister; } idle = 0; /* polling delay is not needed */ } ret = devm_request_irq(dev, irq, rcar_thermal_irq, IRQF_SHARED, dev_name(dev), common); if (ret) { dev_err(dev, "irq request failed\n"); goto error_unregister; } /* update ENR bits */ if (chip->irq_per_ch) enr_bits |= 1 << i; } for (i = 0;; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); if (!res) break; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto error_unregister; } priv->base = devm_ioremap_resource(dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto error_unregister; } priv->common = common; priv->id = i; priv->chip = chip; mutex_init(&priv->lock); INIT_LIST_HEAD(&priv->list); INIT_DELAYED_WORK(&priv->work, rcar_thermal_work); ret = rcar_thermal_update_temp(priv); if (ret < 0) goto error_unregister; if (chip->use_of_thermal) { priv->zone = devm_thermal_of_zone_register( dev, i, priv, &rcar_thermal_zone_ops); } else { priv->zone = thermal_zone_device_register_with_trips( "rcar_thermal", trips, ARRAY_SIZE(trips), priv, &rcar_thermal_zone_ops, NULL, 0, idle); ret = thermal_zone_device_enable(priv->zone); if (ret) { thermal_zone_device_unregister(priv->zone); priv->zone = ERR_PTR(ret); } } if (IS_ERR(priv->zone)) { dev_err(dev, "can't register thermal zone\n"); ret = PTR_ERR(priv->zone); priv->zone = NULL; goto error_unregister; } if (chip->use_of_thermal) { ret = thermal_add_hwmon_sysfs(priv->zone); if (ret) goto error_unregister; } rcar_thermal_irq_enable(priv); list_move_tail(&priv->list, &common->head); /* update ENR bits */ if (!chip->irq_per_ch) enr_bits |= 3 << (i * 8); } if (common->base && enr_bits) rcar_thermal_common_write(common, ENR, enr_bits); dev_info(dev, "%d sensor probed\n", i); return 0; error_unregister: rcar_thermal_remove(pdev); return ret; } #ifdef CONFIG_PM_SLEEP static int rcar_thermal_suspend(struct device *dev) { struct rcar_thermal_common *common = dev_get_drvdata(dev); struct rcar_thermal_priv *priv = list_first_entry(&common->head, typeof(*priv), list); if (priv->chip->needs_suspend_resume) { rcar_thermal_common_write(common, ENR, 0); rcar_thermal_irq_disable(priv); rcar_thermal_bset(priv, THSCR, CPCTL, 0); } return 0; } static int rcar_thermal_resume(struct device *dev) { struct rcar_thermal_common *common = dev_get_drvdata(dev); struct rcar_thermal_priv *priv = list_first_entry(&common->head, typeof(*priv), list); int ret; if (priv->chip->needs_suspend_resume) { ret = rcar_thermal_update_temp(priv); if (ret < 0) return ret; rcar_thermal_irq_enable(priv); rcar_thermal_common_write(common, ENR, 0x03); } return 0; } #endif static SIMPLE_DEV_PM_OPS(rcar_thermal_pm_ops, rcar_thermal_suspend, rcar_thermal_resume); static struct platform_driver rcar_thermal_driver = { .driver = { .name = "rcar_thermal", .pm = &rcar_thermal_pm_ops, .of_match_table = rcar_thermal_dt_ids, }, .probe = rcar_thermal_probe, .remove = rcar_thermal_remove, }; module_platform_driver(rcar_thermal_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
// SPDX-License-Identifier: GPL-2.0-only void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); void raid6_neon1_xor_syndrome_real(int disks, int start, int stop, unsigned long bytes, void **ptrs); void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); void raid6_neon2_xor_syndrome_real(int disks, int start, int stop, unsigned long bytes, void **ptrs); void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); void raid6_neon4_xor_syndrome_real(int disks, int start, int stop, unsigned long bytes, void **ptrs); void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); void raid6_neon8_xor_syndrome_real(int disks, int start, int stop, unsigned long bytes, void **ptrs); void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, uint8_t *dq, const uint8_t *pbmul, const uint8_t *qmul); void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, const uint8_t *qmul);
/******************************************************************************* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #ifndef _cl206e_h_ #define _cl206e_h_ /* dma opcode2 format */ #define NV206E_DMA_OPCODE2 1:0 #define NV206E_DMA_OPCODE2_NONE (0x00000000) /* dma jump_long format */ #define NV206E_DMA_OPCODE2_JUMP_LONG (0x00000001) #define NV206E_DMA_JUMP_LONG_OFFSET 31:2 /* dma call format */ #define NV206E_DMA_OPCODE2_CALL (0x00000002) #define NV206E_DMA_CALL_OFFSET 31:2 #endif /* _cl206e_h_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SPARC_MEMCTRL_H #define _SPARC_MEMCTRL_H typedef int (*dimm_printer_t)(int synd_code, unsigned long paddr, char *buf, int buflen); int register_dimm_printer(dimm_printer_t func); void unregister_dimm_printer(dimm_printer_t func); #endif /* _SPARC_MEMCTRL_H */
// SPDX-License-Identifier: GPL-2.0 /* * macsonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, converted to unified driver model, made it work as * a module again, and from the mac68k project, introduced more 32-bit cards * and dhd's support for 16-bit cards. * * (C) 1998 Alan Cox * * Debugging Andreas Ehliar, Michael Schmitz * * Based on code * (C) 1996 by Thomas Bogendoerfer ([email protected]) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse ([email protected]) * * A driver for the Mac onboard Sonic ethernet chip. * * 98/12/21 MSch: judged from tests on Q800, it's basically working, * but eating up both receive and transmit resources * and duplicating packets. Needs more testing. * * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed. * * 00/10/31 [email protected]: Updated driver for 2.4 kernels, fixed problems * on centris. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/bitrev.h> #include <linux/slab.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/hwtest.h> #include <asm/dma.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include "sonic.h" /* These should basically be bus-size and endian independent (since the SONIC is at least smart enough that it uses the same endianness as the host, unlike certain less enlightened Macintosh NICs) */ #define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \ + lp->reg_offset)) #define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \ + lp->reg_offset)) /* For onboard SONIC */ #define ONBOARD_SONIC_REGISTERS 0x50F0A000 #define ONBOARD_SONIC_PROM_BASE 0x50f08000 enum macsonic_type { MACSONIC_DUODOCK, MACSONIC_APPLE, MACSONIC_APPLE16, MACSONIC_DAYNA, MACSONIC_DAYNALINK }; /* For the built-in SONIC in the Duo Dock */ #define DUODOCK_SONIC_REGISTERS 0xe10000 #define DUODOCK_SONIC_PROM_BASE 0xe12000 /* For Apple-style NuBus SONIC */ #define APPLE_SONIC_REGISTERS 0 #define APPLE_SONIC_PROM_BASE 0x40000 /* Daynalink LC SONIC */ #define DAYNALINK_PROM_BASE 0x400000 /* For Dayna-style NuBus SONIC (haven't seen one yet) */ #define DAYNA_SONIC_REGISTERS 0x180000 /* This is what OpenBSD says. However, this is definitely in NuBus ROM space so we should be able to get it by walking the NuBus resource directories */ #define DAYNA_SONIC_MAC_ADDR 0xffe004 #define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) /* * For reversing the PROM address */ static inline void bit_reverse_addr(unsigned char addr[6]) { int i; for(i = 0; i < 6; i++) addr[i] = bitrev8(addr[i]); } static int macsonic_open(struct net_device* dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); goto err; } /* Under the A/UX interrupt scheme, the onboard SONIC interrupt gets * moved from level 2 to level 3. Unfortunately we still get some * level 2 interrupts so register the handler for both. */ if (dev->irq == IRQ_AUTO_3) { retval = request_irq(IRQ_NUBUS_9, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); goto err_irq; } } retval = sonic_open(dev); if (retval) goto err_irq_nubus; return 0; err_irq_nubus: if (dev->irq == IRQ_AUTO_3) free_irq(IRQ_NUBUS_9, dev); err_irq: free_irq(dev->irq, dev); err: return retval; } static int macsonic_close(struct net_device* dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); if (dev->irq == IRQ_AUTO_3) free_irq(IRQ_NUBUS_9, dev); return err; } static const struct net_device_ops macsonic_netdev_ops = { .ndo_open = macsonic_open, .ndo_stop = macsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_get_stats = sonic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int macsonic_init(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); int err = sonic_alloc_descriptors(dev); if (err) return err; dev->netdev_ops = &macsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT, 0xffff); SONIC_WRITE(SONIC_FAET, 0xffff); SONIC_WRITE(SONIC_MPT, 0xffff); return 0; } #define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \ memcmp(mac, "\x00\xA0\x40", 3) && \ memcmp(mac, "\x00\x80\x19", 3) && \ memcmp(mac, "\x00\x05\x02", 3)) static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; unsigned short val; u8 addr[ETH_ALEN]; /* * On NuBus boards we can sometimes look in the ROM resources. * No such luck for comm-slot/onboard. * On the PowerBook 520, the PROM base address is a mystery. */ if (hwreg_present((void *)prom_addr)) { int i; for (i = 0; i < 6; i++) addr[i] = SONIC_READ_PROM(i); eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* * Most of the time, the address is bit-reversed. The NetBSD * source has a rather long and detailed historical account of * why this is so. */ bit_reverse_addr(addr); eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* * If we still have what seems to be a bogus address, we'll * look in the CAM. The top entry should be ours. */ printk(KERN_WARNING "macsonic: MAC address in PROM seems " "to be invalid, trying CAM\n"); } else { printk(KERN_WARNING "macsonic: cannot read MAC address from " "PROM, trying CAM\n"); } /* This only works if MacOS has already initialized the card. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_CEP, 15); val = SONIC_READ(SONIC_CAP2); addr[5] = val >> 8; addr[4] = val & 0xff; val = SONIC_READ(SONIC_CAP1); addr[3] = val >> 8; addr[2] = val & 0xff; val = SONIC_READ(SONIC_CAP0); addr[1] = val >> 8; addr[0] = val & 0xff; eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* Still nonsense ... messed up someplace! */ printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 " "seems invalid, will use a random MAC\n"); eth_hw_addr_random(dev); } static int mac_onboard_sonic_probe(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); int sr; bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM; /* Bogus probing, on the models which may or may not have Ethernet (BTW, the Ethernet *is* always at the same address, and nothing else lives there, at least if Apple's documentation is to be believed) */ if (commslot || macintosh_config->ident == MAC_MODEL_C610) { int card_present; card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); if (!card_present) { pr_info("Onboard/comm-slot SONIC not found\n"); return -ENODEV; } } /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = ONBOARD_SONIC_REGISTERS; if (via_alt_mapping) dev->irq = IRQ_AUTO_3; else dev->irq = IRQ_NUBUS_9; /* The PowerBook's SONIC is 16 bit always. */ if (macintosh_config->ident == MAC_MODEL_PB520) { lp->reg_offset = 0; lp->dma_bitmode = SONIC_BITMODE16; } else if (commslot) { /* Some of the comm-slot cards are 16 bit. But some of them are not. The 32-bit cards use offset 2 and have known revisions, we try reading the revision register at offset 2, if we don't get a known revision we assume 16 bit at offset 0. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE16; sr = SONIC_READ(SONIC_SR); if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101) /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */ lp->dma_bitmode = SONIC_BITMODE32; else { lp->dma_bitmode = SONIC_BITMODE16; lp->reg_offset = 0; } } else { /* All onboard cards are at offset 2 with 32 bit DMA. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE32; } pr_info("Onboard/comm-slot SONIC, revision 0x%04x, %d bit DMA, register offset %d\n", SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16, lp->reg_offset); /* This is sometimes useful to find out how MacOS configured the card */ pr_debug("%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | (lp->dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, as it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ mac_onboard_sonic_ethernet_addr(dev); pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); /* Shared init code */ return macsonic_init(dev); } static int mac_sonic_nubus_ethernet_addr(struct net_device *dev, unsigned long prom_addr, int id) { u8 addr[ETH_ALEN]; int i; for(i = 0; i < 6; i++) addr[i] = SONIC_READ_PROM(i); /* Some of the addresses are bit-reversed */ if (id != MACSONIC_DAYNA) bit_reverse_addr(addr); eth_hw_addr_set(dev, addr); return 0; } static int macsonic_ident(struct nubus_rsrc *fres) { if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC && fres->dr_sw == NUBUS_DRSW_SONIC_LC) return MACSONIC_DAYNALINK; if (fres->dr_hw == NUBUS_DRHW_SONIC && fres->dr_sw == NUBUS_DRSW_APPLE) { /* There has to be a better way to do this... */ if (strstr(fres->board->name, "DuoDock")) return MACSONIC_DUODOCK; else return MACSONIC_APPLE; } if (fres->dr_hw == NUBUS_DRHW_SMC9194 && fres->dr_sw == NUBUS_DRSW_DAYNA) return MACSONIC_DAYNA; if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC && fres->dr_sw == 0) { /* huh? */ return MACSONIC_APPLE16; } return -1; } static int mac_sonic_nubus_probe_board(struct nubus_board *board, int id, struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); unsigned long base_addr, prom_addr; u16 sonic_dcr; int reg_offset, dma_bitmode; switch (id) { case MACSONIC_DUODOCK: base_addr = board->slot_addr + DUODOCK_SONIC_REGISTERS; prom_addr = board->slot_addr + DUODOCK_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 2; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 0; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE16: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNALINK: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + DAYNALINK_PROM_BASE; sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNA: base_addr = board->slot_addr + DAYNA_SONIC_REGISTERS; prom_addr = board->slot_addr + DAYNA_SONIC_MAC_ADDR; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; default: printk(KERN_ERR "macsonic: WTF, id is %d\n", id); return -ENODEV; } /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = base_addr; lp->reg_offset = reg_offset; lp->dma_bitmode = dma_bitmode; dev->irq = SLOT2IRQ(board->slot); dev_info(&board->dev, "%s, revision 0x%04x, %d bit DMA, register offset %d\n", board->name, SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16, lp->reg_offset); /* This is sometimes useful to find out how MacOS configured the card */ dev_dbg(&board->dev, "%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, since it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ if (mac_sonic_nubus_ethernet_addr(dev, prom_addr, id) != 0) return -ENODEV; dev_info(&board->dev, "SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); /* Shared init code */ return macsonic_init(dev); } static int mac_sonic_platform_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; int err; dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); err = mac_onboard_sonic_probe(dev); if (err) goto out; sonic_msg_init(dev); err = register_netdev(dev); if (err) goto undo_probe; return 0; undo_probe: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Macintosh SONIC ethernet driver"); MODULE_ALIAS("platform:macsonic"); #include "sonic.c" static void mac_sonic_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(dev); } static struct platform_driver mac_sonic_platform_driver = { .probe = mac_sonic_platform_probe, .remove = mac_sonic_platform_remove, .driver = { .name = "macsonic", }, }; static int mac_sonic_nubus_probe(struct nubus_board *board) { struct net_device *ndev; struct sonic_local *lp; struct nubus_rsrc *fres; int id = -1; int err; /* The platform driver will handle a PDS or Comm Slot card (even if * it has a pseudoslot declaration ROM). */ if (macintosh_config->expansion_type == MAC_EXP_PDS_COMM) return -ENODEV; for_each_board_func_rsrc(board, fres) { if (fres->category != NUBUS_CAT_NETWORK || fres->type != NUBUS_TYPE_ETHERNET) continue; id = macsonic_ident(fres); if (id != -1) break; } if (!fres) return -ENODEV; ndev = alloc_etherdev(sizeof(struct sonic_local)); if (!ndev) return -ENOMEM; lp = netdev_priv(ndev); lp->device = &board->dev; SET_NETDEV_DEV(ndev, &board->dev); err = mac_sonic_nubus_probe_board(board, id, ndev); if (err) goto out; sonic_msg_init(ndev); err = register_netdev(ndev); if (err) goto undo_probe; nubus_set_drvdata(board, ndev); return 0; undo_probe: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); out: free_netdev(ndev); return err; } static void mac_sonic_nubus_remove(struct nubus_board *board) { struct net_device *ndev = nubus_get_drvdata(board); struct sonic_local *lp = netdev_priv(ndev); unregister_netdev(ndev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(ndev); } static struct nubus_driver mac_sonic_nubus_driver = { .probe = mac_sonic_nubus_probe, .remove = mac_sonic_nubus_remove, .driver = { .name = "macsonic-nubus", .owner = THIS_MODULE, }, }; static int perr, nerr; static int __init mac_sonic_init(void) { perr = platform_driver_register(&mac_sonic_platform_driver); nerr = nubus_driver_register(&mac_sonic_nubus_driver); return 0; } module_init(mac_sonic_init); static void __exit mac_sonic_exit(void) { if (!perr) platform_driver_unregister(&mac_sonic_platform_driver); if (!nerr) nubus_driver_unregister(&mac_sonic_nubus_driver); } module_exit(mac_sonic_exit);
// SPDX-License-Identifier: GPL-2.0 /* * xtensa mmu stuff * * Extracted from init.c */ #include <linux/memblock.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/cache.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/initialize_mmu.h> #include <asm/io.h> DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; #if defined(CONFIG_HIGHMEM) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pmd_t *pmd = pmd_off_k(vaddr); pte_t *pte; unsigned long i; n_pages = ALIGN(n_pages, PTRS_PER_PTE); pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", __func__, vaddr, n_pages); pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; ++i) pte_clear(NULL, 0, pte + i); for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { pte_t *cur_pte = pte + i; BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", __func__, pmd, cur_pte); } return pte; } static void __init fixedrange_init(void) { BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE); init_pmd(FIXADDR_START, __end_of_fixed_addresses); } #endif void __init paging_init(void) { #ifdef CONFIG_HIGHMEM fixedrange_init(); pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); kmap_init(); #endif } /* * Flush the mmu and reset associated register to default values. */ void init_mmu(void) { #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) /* * Writing zeros to the instruction and data TLBCFG special * registers ensure that valid values exist in the register. * * For existing PGSZID<w> fields, zero selects the first element * of the page-size array. For nonexistent PGSZID<w> fields, * zero is the best value to write. Also, when changing PGSZID<w> * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif init_kio(); local_flush_tlb_all(); /* Set rasid register to a known value. */ set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); /* Set PTEVADDR special register to the start of the page * table, which is in kernel mappable space (ie. not * statically mapped). This register's value is undefined on * reset. */ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); } void init_kio(void) { #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF) /* * Update the IO area mapping in case xtensa_kio_paddr has changed */ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), XCHAL_KIO_CACHED_VADDR + 6); write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), XCHAL_KIO_CACHED_VADDR + 6); write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); #endif }
// SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" #include "bpf_experimental.h" /* From include/linux/filter.h */ #define MAX_BPF_STACK 512 #if defined(__TARGET_ARCH_x86) struct elem { struct bpf_timer t; char pad[256]; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); __type(key, int); __type(value, struct elem); } array SEC(".maps"); SEC("kprobe") __description("Private stack, single prog") __success __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") __jited(" addq %gs:0x{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x100(%r9)") __naked void private_stack_single_prog(void) { asm volatile (" \ r1 = 42; \ *(u64 *)(r10 - 256) = r1; \ r0 = 0; \ exit; \ " ::: __clobber_all); } SEC("raw_tp") __description("No private stack") __success __arch_x86_64 __jited(" subq $0x8, %rsp") __naked void no_private_stack_nested(void) { asm volatile (" \ r1 = 42; \ *(u64 *)(r10 - 8) = r1; \ r0 = 0; \ exit; \ " ::: __clobber_all); } __used __naked static void cumulative_stack_depth_subprog(void) { asm volatile (" \ r1 = 41; \ *(u64 *)(r10 - 32) = r1; \ call %[bpf_get_smp_processor_id]; \ exit; \ " : : __imm(bpf_get_smp_processor_id) : __clobber_all); } SEC("kprobe") __description("Private stack, subtree > MAX_BPF_STACK") __success __arch_x86_64 /* private stack fp for the main prog */ __jited(" movabsq $0x{{.*}}, %r9") __jited(" addq %gs:0x{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq 0x{{.*}}") __jited(" popq %r9") __jited(" xorl %eax, %eax") __naked void private_stack_nested_1(void) { asm volatile (" \ r1 = 42; \ *(u64 *)(r10 - %[max_bpf_stack]) = r1; \ call cumulative_stack_depth_subprog; \ r0 = 0; \ exit; \ " : : __imm_const(max_bpf_stack, MAX_BPF_STACK) : __clobber_all); } __naked __noinline __used static unsigned long loop_callback(void) { asm volatile (" \ call %[bpf_get_prandom_u32]; \ r1 = 42; \ *(u64 *)(r10 - 512) = r1; \ call cumulative_stack_depth_subprog; \ r0 = 0; \ exit; \ " : : __imm(bpf_get_prandom_u32) : __clobber_common); } SEC("raw_tp") __description("Private stack, callback") __success __arch_x86_64 /* for func loop_callback */ __jited("func #1") __jited(" endbr64") __jited(" nopl (%rax,%rax)") __jited(" nopl (%rax)") __jited(" pushq %rbp") __jited(" movq %rsp, %rbp") __jited(" endbr64") __jited(" movabsq $0x{{.*}}, %r9") __jited(" addq %gs:0x{{.*}}, %r9") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") __naked void private_stack_callback(void) { asm volatile (" \ r1 = 1; \ r2 = %[loop_callback]; \ r3 = 0; \ r4 = 0; \ call %[bpf_loop]; \ r0 = 0; \ exit; \ " : : __imm_ptr(loop_callback), __imm(bpf_loop) : __clobber_common); } SEC("fentry/bpf_fentry_test9") __description("Private stack, exception in main prog") __success __retval(0) __arch_x86_64 __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") int private_stack_exception_main_prog(void) { asm volatile (" \ r1 = 42; \ *(u64 *)(r10 - 512) = r1; \ " ::: __clobber_common); bpf_throw(0); return 0; } __used static int subprog_exception(void) { bpf_throw(0); return 0; } SEC("fentry/bpf_fentry_test9") __description("Private stack, exception in subprog") __success __retval(0) __arch_x86_64 __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") int private_stack_exception_sub_prog(void) { asm volatile (" \ r1 = 42; \ *(u64 *)(r10 - 512) = r1; \ call subprog_exception; \ " ::: __clobber_common); return 0; } int glob; __noinline static void subprog2(int *val) { glob += val[0] * 2; } __noinline static void subprog1(int *val) { int tmp[64] = {}; tmp[0] = *val; subprog2(tmp); } __noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer) { subprog1(key); return 0; } __noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer) { return 0; } SEC("fentry/bpf_fentry_test9") __description("Private stack, async callback, not nested") __success __retval(0) __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") int private_stack_async_callback_1(void) { struct bpf_timer *arr_timer; int array_key = 0; arr_timer = bpf_map_lookup_elem(&array, &array_key); if (!arr_timer) return 0; bpf_timer_init(arr_timer, &array, 1); bpf_timer_set_callback(arr_timer, timer_cb2); bpf_timer_start(arr_timer, 0, 0); subprog1(&array_key); return 0; } SEC("fentry/bpf_fentry_test9") __description("Private stack, async callback, potential nesting") __success __retval(0) __arch_x86_64 __jited(" subq $0x100, %rsp") int private_stack_async_callback_2(void) { struct bpf_timer *arr_timer; int array_key = 0; arr_timer = bpf_map_lookup_elem(&array, &array_key); if (!arr_timer) return 0; bpf_timer_init(arr_timer, &array, 1); bpf_timer_set_callback(arr_timer, timer_cb1); bpf_timer_start(arr_timer, 0, 0); subprog1(&array_key); return 0; } #else SEC("kprobe") __description("private stack is not supported, use a dummy test") __success int dummy_test(void) { return 0; } #endif char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <cpuid.h> #include <elf.h> #include <errno.h> #include <fcntl.h> #include <stdbool.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <sys/auxv.h> #include "defines.h" #include "../kselftest_harness.h" #include "main.h" static const uint64_t MAGIC = 0x1122334455667788ULL; static const uint64_t MAGIC2 = 0x8877665544332211ULL; vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave; /* * Security Information (SECINFO) data structure needed by a few SGX * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data * about an enclave page. &enum sgx_secinfo_page_state specifies the * secinfo flags used for page state. */ enum sgx_secinfo_page_state { SGX_SECINFO_PENDING = (1 << 3), SGX_SECINFO_MODIFIED = (1 << 4), SGX_SECINFO_PR = (1 << 5), }; struct vdso_symtab { Elf64_Sym *elf_symtab; const char *elf_symstrtab; Elf64_Word *elf_hashtab; }; static Elf64_Dyn *vdso_get_dyntab(void *addr) { Elf64_Ehdr *ehdr = addr; Elf64_Phdr *phdrtab = addr + ehdr->e_phoff; int i; for (i = 0; i < ehdr->e_phnum; i++) if (phdrtab[i].p_type == PT_DYNAMIC) return addr + phdrtab[i].p_offset; return NULL; } static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag) { int i; for (i = 0; dyntab[i].d_tag != DT_NULL; i++) if (dyntab[i].d_tag == tag) return addr + dyntab[i].d_un.d_ptr; return NULL; } static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab) { Elf64_Dyn *dyntab = vdso_get_dyntab(addr); symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB); if (!symtab->elf_symtab) return false; symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB); if (!symtab->elf_symstrtab) return false; symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH); if (!symtab->elf_hashtab) return false; return true; } static inline int sgx2_supported(void) { unsigned int eax, ebx, ecx, edx; __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx); return eax & 0x2; } static unsigned long elf_sym_hash(const char *name) { unsigned long h = 0, high; while (*name) { h = (h << 4) + *name++; high = h & 0xf0000000; if (high) h ^= high >> 24; h &= ~high; } return h; } static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name) { Elf64_Word bucketnum = symtab->elf_hashtab[0]; Elf64_Word *buckettab = &symtab->elf_hashtab[2]; Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum]; Elf64_Sym *sym; Elf64_Word i; for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF; i = chaintab[i]) { sym = &symtab->elf_symtab[i]; if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name])) return sym; } return NULL; } /* * Return the offset in the enclave where the TCS segment can be found. * The first RW segment loaded is the TCS. */ static off_t encl_get_tcs_offset(struct encl *encl) { int i; for (i = 0; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE)) return seg->offset; } return -1; } /* * Return the offset in the enclave where the data segment can be found. * The first RW segment loaded is the TCS, skip that to get info on the * data segment. */ static off_t encl_get_data_offset(struct encl *encl) { int i; for (i = 1; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; if (seg->prot == (PROT_READ | PROT_WRITE)) return seg->offset; } return -1; } FIXTURE(enclave) { struct encl encl; struct sgx_enclave_run run; }; static bool setup_test_encl(unsigned long heap_size, struct encl *encl, struct __test_metadata *_metadata) { Elf64_Sym *sgx_enter_enclave_sym = NULL; struct vdso_symtab symtab; struct encl_segment *seg; char maps_line[256]; FILE *maps_file; unsigned int i; void *addr; if (!encl_load("test_encl.elf", encl, heap_size)) { encl_delete(encl); TH_LOG("Failed to load the test enclave."); return false; } if (!encl_measure(encl)) goto err; if (!encl_build(encl)) goto err; /* * An enclave consumer only must do this. */ for (i = 0; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; addr = mmap((void *)encl->encl_base + seg->offset, seg->size, seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0); EXPECT_NE(addr, MAP_FAILED); if (addr == MAP_FAILED) goto err; } /* Get vDSO base address */ addr = (void *)getauxval(AT_SYSINFO_EHDR); if (!addr) goto err; if (!vdso_get_symtab(addr, &symtab)) goto err; sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave"); if (!sgx_enter_enclave_sym) goto err; vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value; return true; err: for (i = 0; i < encl->nr_segments; i++) { seg = &encl->segment_tbl[i]; TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot); } maps_file = fopen("/proc/self/maps", "r"); if (maps_file != NULL) { while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) { maps_line[strlen(maps_line) - 1] = '\0'; if (strstr(maps_line, "/dev/sgx_enclave")) TH_LOG("%s", maps_line); } fclose(maps_file); } TH_LOG("Failed to initialize the test enclave."); encl_delete(encl); return false; } FIXTURE_SETUP(enclave) { } FIXTURE_TEARDOWN(enclave) { encl_delete(&self->encl); } #define ENCL_CALL(op, run, clobbered) \ ({ \ int ret; \ if ((clobbered)) \ ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \ EENTER, 0, 0, (run)); \ else \ ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \ (run)); \ ret; \ }) #define EXPECT_EEXIT(run) \ do { \ EXPECT_EQ((run)->function, EEXIT); \ if ((run)->function != EEXIT) \ TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \ (run)->exception_error_code, (run)->exception_addr); \ } while (0) TEST_F(enclave, unclobbered_vdso) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } /* * A section metric is concatenated in a way that @low bits 12-31 define the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the * metric. */ static unsigned long sgx_calc_section_metric(unsigned int low, unsigned int high) { return (low & GENMASK_ULL(31, 12)) + ((high & GENMASK_ULL(19, 0)) << 32); } /* * Sum total available physical SGX memory across all EPC sections * * Return: total available physical SGX memory available on system */ static unsigned long get_total_epc_mem(void) { unsigned int eax, ebx, ecx, edx; unsigned long total_size = 0; unsigned int type; int section = 0; while (true) { __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx); type = eax & SGX_CPUID_EPC_MASK; if (type == SGX_CPUID_EPC_INVALID) break; if (type != SGX_CPUID_EPC_SECTION) break; total_size += sgx_calc_section_metric(ecx, edx); section++; } return total_size; } TEST_F(enclave, unclobbered_vdso_oversubscribed) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; unsigned long total_mem; total_mem = get_total_epc_mem(); ASSERT_NE(total_mem, 0); ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900) { struct sgx_enclave_remove_pages remove_ioc; struct sgx_enclave_modify_types modt_ioc; struct encl_op_get_from_buf get_op; struct encl_op_eaccept eaccept_op; struct encl_op_put_to_buf put_op; struct encl_segment *heap; unsigned long total_mem; int ret, errno_save; unsigned long addr; unsigned long i; /* * Create enclave with additional heap that is as big as all * available physical SGX memory. */ total_mem = get_total_epc_mem(); ASSERT_NE(total_mem, 0); TH_LOG("Creating an enclave with %lu bytes heap may take a while ...", total_mem); ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata)); /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* SGX2 is supported by kernel and hardware, test can proceed. */ memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; heap = &self->encl.segment_tbl[self->encl.nr_segments - 1]; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); /* Trim entire heap. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = heap->offset; modt_ioc.length = heap->size; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; TH_LOG("Changing type of %zd bytes to trimmed may take a while ...", heap->size); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, heap->size); /* EACCEPT all removed pages. */ addr = self->encl.encl_base + heap->offset; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.header.type = ENCL_OP_EACCEPT; TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...", heap->size); for (i = 0; i < heap->size; i += 4096) { eaccept_op.epc_addr = addr + i; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); ASSERT_EQ(eaccept_op.ret, 0); ASSERT_EQ(self->run.function, EEXIT); } /* Complete page removal. */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = heap->offset; remove_ioc.length = heap->size; TH_LOG("Removing %zd bytes from enclave may take a while ...", heap->size); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, heap->size); } TEST_F(enclave, clobbered_vdso) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9, struct sgx_enclave_run *run) { run->user_data = 0; return 0; } TEST_F(enclave, clobbered_vdso_and_user_function) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; self->run.user_handler = (__u64)test_handler; self->run.user_data = 0xdeadbeef; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } /* * Sanity check that it is possible to enter either of the two hardcoded TCS */ TEST_F(enclave, tcs_entry) { struct encl_op_header op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; op.type = ENCL_OP_NOP; EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Move to the next TCS. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Second page of .data segment is used to test changing PTE permissions. * This spans the local encl_buffer within the test enclave. * * 1) Start with a sanity check: a value is written to the target page within * the enclave and read back to ensure target page can be written to. * 2) Change PTE permissions (RW -> RO) of target page within enclave. * 3) Repeat (1) - this time expecting a regular #PF communicated via the * vDSO. * 4) Change PTE permissions of target page within enclave back to be RW. * 5) Repeat (1) by resuming enclave, now expected to be possible to write to * and read from target page within enclave. */ TEST_F(enclave, pte_permissions) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; unsigned long data_start; int ret; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check to ensure it is possible to write to page that will * have its permissions manipulated. */ /* Write MAGIC to page */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that it is the * value previously written (MAGIC). */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Change PTE permissions of target page within the enclave */ ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ); if (ret) perror("mprotect"); /* * PTE permissions of target page changed to read-only, EPCM * permissions unchanged (EPCM permissions are RW), attempt to * write to the page, expecting a regular #PF. */ put_addr_op.value = MAGIC2; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x7); EXPECT_EQ(self->run.exception_addr, data_start); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Change PTE permissions back to enable enclave to write to the * target page and resume enclave - do not expect any exceptions this * time. */ ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE); if (ret) perror("mprotect"); EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC2); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Modifying permissions of TCS page should not be possible. */ TEST_F(enclave, tcs_permissions) { struct sgx_enclave_restrict_permissions ioc; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; memset(&ioc, 0, sizeof(ioc)); /* * Ensure kernel supports needed ioctl() and system supports needed * commands. */ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); errno_save = ret == -1 ? errno : 0; /* * Invalid parameters were provided during sanity check, * expect command to fail. */ ASSERT_EQ(ret, -1); /* ret == -1 */ if (errno_save == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()"); else if (errno_save == ENODEV) SKIP(return, "System does not support SGX2"); /* * Attempt to make TCS page read-only. This is not allowed and * should be prevented by the kernel. */ ioc.offset = encl_get_tcs_offset(&self->encl); ioc.length = PAGE_SIZE; ioc.permissions = SGX_SECINFO_R; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, -1); EXPECT_EQ(errno_save, EINVAL); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 0); } /* * Enclave page permission test. * * Modify and restore enclave page's EPCM (enclave) permissions from * outside enclave (ENCLS[EMODPR] via kernel) as well as from within * enclave (via ENCLU[EMODPE]). Check for page fault if * VMA allows access but EPCM permissions do not. */ TEST_F(enclave, epcm_permissions) { struct sgx_enclave_restrict_permissions restrict_ioc; struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; struct encl_op_emodpe emodpe_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Ensure kernel supports needed ioctl() and system supports needed * commands. */ memset(&restrict_ioc, 0, sizeof(restrict_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &restrict_ioc); errno_save = ret == -1 ? errno : 0; /* * Invalid parameters were provided during sanity check, * expect command to fail. */ ASSERT_EQ(ret, -1); /* ret == -1 */ if (errno_save == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()"); else if (errno_save == ENODEV) SKIP(return, "System does not support SGX2"); /* * Page that will have its permissions changed is the second data * page in the .data segment. This forms part of the local encl_buffer * within the enclave. * * At start of test @data_start should have EPCM as well as PTE and * VMA permissions of RW. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before making * any changes to page permissions. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that * page is writable. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Change EPCM permissions to read-only. Kernel still considers * the page writable. */ memset(&restrict_ioc, 0, sizeof(restrict_ioc)); restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; restrict_ioc.length = PAGE_SIZE; restrict_ioc.permissions = SGX_SECINFO_R; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &restrict_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(restrict_ioc.result, 0); EXPECT_EQ(restrict_ioc.count, 4096); /* * EPCM permissions changed from kernel, need to EACCEPT from enclave. */ eaccept_op.epc_addr = data_start; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * EPCM permissions of page is now read-only, expect #PF * on EPCM when attempting to write to page from within enclave. */ put_addr_op.value = MAGIC2; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8007); EXPECT_EQ(self->run.exception_addr, data_start); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Received AEX but cannot return to enclave at same entrypoint, * need different TCS from where EPCM permission can be made writable * again. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; /* * Enter enclave at new TCS to change EPCM permissions to be * writable again and thus fix the page fault that triggered the * AEX. */ emodpe_op.epc_addr = data_start; emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W; emodpe_op.header.type = ENCL_OP_EMODPE; EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Attempt to return to main TCS to resume execution at faulting * instruction, PTE should continue to allow writing to the page. */ self->run.tcs = self->encl.encl_base; /* * Wrong page permissions that caused original fault has * now been fixed via EPCM permissions. * Resume execution in main TCS to re-attempt the memory access. */ self->run.tcs = self->encl.encl_base; EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC2); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Test the addition of pages to an initialized enclave via writing to * a page belonging to the enclave's address space but was not added * during enclave creation. */ TEST_F(enclave, augment) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; size_t total_size = 0; void *addr; int i; if (!sgx2_supported()) SKIP(return, "SGX2 not supported"); ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes * and test_encl does not consume it all. */ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size); /* * Create memory mapping for the page that will be added. New * memory mapping is for one page right after all existing * mappings. * Kernel will allow new mapping using any permissions if it * falls into the enclave's address range but not backed * by existing enclave pages. */ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Attempt to write to the new page from within enclave. * Expected to fail since page is not (yet) part of the enclave. * The first #PF will trigger the addition of the page to the * enclave, but since the new page needs an EACCEPT from within the * enclave before it can be used it would not be possible * to successfully return to the failing instruction. This is the * cause of the second #PF captured here having the SGX bit set, * it is from hardware preventing the page from being used. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)addr; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_addr, (unsigned long)addr); if (self->run.exception_error_code == 0x6) { munmap(addr, PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EQ(self->run.exception_error_code, 0x8007); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* Handle AEX by running EACCEPT from new entry point. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; eaccept_op.epc_addr = self->encl.encl_base + total_size; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Can now return to main TCS to resume execution. */ self->run.tcs = self->encl.encl_base; EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)addr; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, PAGE_SIZE); } /* * Test for the addition of pages to an initialized enclave via a * pre-emptive run of EACCEPT on page to be added. */ TEST_F(enclave, augment_via_eaccept) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; size_t total_size = 0; void *addr; int i; if (!sgx2_supported()) SKIP(return, "SGX2 not supported"); ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes while * test_encl does not consume it all. */ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size); /* * mmap() a page at end of existing enclave to be used for dynamic * EPC page. * * Kernel will allow new mapping using any permissions if it * falls into the enclave's address range but not backed * by existing enclave pages. */ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again * without a #PF). All should be transparent to userspace. */ eaccept_op.epc_addr = self->encl.encl_base + total_size; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); if (self->run.exception_vector == 14 && self->run.exception_error_code == 4 && self->run.exception_addr == self->encl.encl_base + total_size) { munmap(addr, PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * New page should be accessible from within enclave - attempt to * write to it. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)addr; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)addr; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, PAGE_SIZE); } /* * SGX2 page type modification test in two phases: * Phase 1: * Create a new TCS, consisting out of three new pages (stack page with regular * page type, SSA page with regular page type, and TCS page with TCS page * type) in an initialized enclave and run a simple workload within it. * Phase 2: * Remove the three pages added in phase 1, add a new regular page at the * same address that previously hosted the TCS page and verify that it can * be modified. */ TEST_F(enclave, tcs_create) { struct encl_op_init_tcs_page init_tcs_page_op; struct sgx_enclave_remove_pages remove_ioc; struct encl_op_get_from_addr get_addr_op; struct sgx_enclave_modify_types modt_ioc; struct encl_op_put_to_addr put_addr_op; struct encl_op_get_from_buf get_buf_op; struct encl_op_put_to_buf put_buf_op; void *addr, *tcs, *stack_end, *ssa; struct encl_op_eaccept eaccept_op; size_t total_size = 0; uint64_t val_64; int errno_save; int ret, i; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Add three regular pages via EAUG: one will be the TCS stack, one * will be the TCS SSA, and one will be the new TCS. The stack and * SSA will remain as regular pages, the TCS page will need its * type changed after populated with needed data. */ for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes while * test_encl does not consume it all. */ EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size); /* * mmap() three pages at end of existing enclave to be used for the * three new pages. */ addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; stack_end = (void *)self->encl.encl_base + total_size; tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE; ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE; /* * Run EACCEPT on each new page to trigger the * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow. */ eaccept_op.epc_addr = (unsigned long)stack_end; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); if (self->run.exception_vector == 14 && self->run.exception_error_code == 4 && self->run.exception_addr == (unsigned long)stack_end) { munmap(addr, 3 * PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)ssa; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)tcs; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * Three new pages added to enclave. Now populate the TCS page with * needed data. This should be done from within enclave. Provide * the function that will do the actual data population with needed * data. */ /* * New TCS will use the "encl_dyn_entry" entrypoint that expects * stack to begin in page before TCS page. */ val_64 = encl_get_entry(&self->encl, "encl_dyn_entry"); EXPECT_NE(val_64, 0); init_tcs_page_op.tcs_page = (unsigned long)tcs; init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE; init_tcs_page_op.entry = val_64; init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE; EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Change TCS page type to TCS. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = total_size + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TCS; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* EACCEPT new TCS page from enclave. */ eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Run workload from new TCS. */ self->run.tcs = (unsigned long)tcs; /* * Simple workload to write to data buffer and read value back. */ put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_buf_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_buf_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0); EXPECT_EQ(get_buf_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Phase 2 of test: * Remove pages associated with new TCS, create a regular page * where TCS page used to be and verify it can be used as a regular * page. */ /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = total_size; modt_ioc.length = 3 * PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE); /* * Enter enclave via TCS #1 and approve page removal by sending * EACCEPT for each of three removed pages. */ self->run.tcs = self->encl.encl_base; eaccept_op.epc_addr = (unsigned long)stack_end; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)ssa; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Send final ioctl() to complete page removal. */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = total_size; remove_ioc.length = 3 * PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE); /* * Enter enclave via TCS #1 and access location where TCS #3 was to * trigger dynamic add of regular page at that location. */ eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * New page should be accessible from within enclave - write to it. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)tcs; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)tcs; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, 3 * PAGE_SIZE); } /* * Ensure sane behavior if user requests page removal, does not run * EACCEPT from within enclave but still attempts to finalize page removal * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail * because the removal was not EACCEPTed from within the enclave. */ TEST_F(enclave, remove_added_page_no_eaccept) { struct sgx_enclave_remove_pages remove_ioc; struct encl_op_get_from_addr get_addr_op; struct sgx_enclave_modify_types modt_ioc; struct encl_op_put_to_addr put_addr_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* Skip EACCEPT */ /* Send final ioctl() to complete page removal */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; remove_ioc.length = PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; /* Operation not permitted since EACCEPT was omitted. */ EXPECT_EQ(ret, -1); EXPECT_EQ(errno_save, EPERM); EXPECT_EQ(remove_ioc.count, 0); } /* * Request enclave page removal but instead of correctly following with * EACCEPT a read attempt to page is made from within the enclave. */ TEST_F(enclave, remove_added_page_invalid_access) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct sgx_enclave_modify_types ioc; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&ioc, 0, sizeof(ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&ioc, 0, sizeof(ioc)); ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; ioc.length = PAGE_SIZE; ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 4096); /* * Read from page that was just removed. */ get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); /* * From kernel perspective the page is present but according to SGX the * page should not be accessible so a #PF with SGX bit set is * expected. */ EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8005); EXPECT_EQ(self->run.exception_addr, data_start); } /* * Request enclave page removal and correctly follow with * EACCEPT but do not follow with removal ioctl() but instead a read attempt * to removed page is made from within the enclave. */ TEST_F(enclave, remove_added_page_invalid_access_after_eaccept) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct sgx_enclave_modify_types ioc; struct encl_op_eaccept eaccept_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&ioc, 0, sizeof(ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&ioc, 0, sizeof(ioc)); ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; ioc.length = PAGE_SIZE; ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 4096); eaccept_op.epc_addr = (unsigned long)data_start; eaccept_op.ret = 0; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Skip ioctl() to remove page. */ /* * Read from page that was just removed. */ get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); /* * From kernel perspective the page is present but according to SGX the * page should not be accessible so a #PF with SGX bit set is * expected. */ EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8005); EXPECT_EQ(self->run.exception_addr, data_start); } TEST_F(enclave, remove_untouched_page) { struct sgx_enclave_remove_pages remove_ioc; struct sgx_enclave_modify_types modt_ioc; struct encl_op_eaccept eaccept_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* SGX2 is supported by kernel and hardware, test can proceed. */ memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* * Enter enclave via TCS #1 and approve page removal by sending * EACCEPT for removed page. */ eaccept_op.epc_addr = data_start; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; remove_ioc.length = PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, 4096); } TEST_HARNESS_MAIN
// SPDX-License-Identifier: GPL-2.0 /* * ARM Ltd. Versatile Express * * LogicTile Express 20MG * V2F-1XV7 * * Cortex-A53 (2 cores) Soft Macrocell Model * * HBI-0247C */ /dts-v1/; #include <dt-bindings/interrupt-controller/arm-gic.h> #include "arm/arm/vexpress-v2m-rs1.dtsi" / { model = "V2F-1XV7 Cortex-A53x2 SMM"; arm,hbi = <0x247>; arm,vexpress,site = <0xf>; compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress"; interrupt-parent = <&gic>; #address-cells = <2>; #size-cells = <2>; chosen { stdout-path = "serial0:38400n8"; }; aliases { serial0 = &v2m_serial0; serial1 = &v2m_serial1; serial2 = &v2m_serial2; serial3 = &v2m_serial3; i2c0 = &v2m_i2c_dvi; i2c1 = &v2m_i2c_pcie; }; cpus { #address-cells = <2>; #size-cells = <0>; cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0 0>; next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0 1>; next-level-cache = <&L2_0>; }; L2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; cache-unified; }; }; memory@80000000 { device_type = "memory"; reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */ }; reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; /* Chipselect 2 is physically at 0x18000000 */ vram: vram@18000000 { /* 8 MB of designated video RAM */ compatible = "shared-dma-pool"; reg = <0 0x18000000 0 0x00800000>; no-map; }; }; gic: interrupt-controller@2c001000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; reg = <0 0x2c001000 0 0x1000>, <0 0x2c002000 0 0x2000>, <0 0x2c004000 0 0x2000>, <0 0x2c006000 0 0x2000>; interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>; }; pmu { compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; }; dcc { compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; smbclk: clock-controller { /* SMC clock */ compatible = "arm,vexpress-osc"; arm,vexpress-sysreg,func = <1 4>; freq-range = <40000000 40000000>; #clock-cells = <0>; clock-output-names = "smclk"; }; regulator-vio { /* VIO to expansion board above */ compatible = "arm,vexpress-volt"; arm,vexpress-sysreg,func = <2 0>; regulator-name = "VIO_UP"; regulator-min-microvolt = <800000>; regulator-max-microvolt = <1800000>; regulator-always-on; }; regulator-12v { /* 12V from power connector J6 */ compatible = "arm,vexpress-volt"; arm,vexpress-sysreg,func = <2 1>; regulator-name = "12"; regulator-always-on; }; temp-fpga { /* FPGA temperature */ compatible = "arm,vexpress-temp"; arm,vexpress-sysreg,func = <4 0>; label = "FPGA"; }; }; smb: bus@8000000 { ranges = <0x8000000 0 0x8000000 0x18000000>; }; };
/* * GMC_8_1 Register documentation * * Copyright (C) 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef GMC_8_1_ENUM_H #define GMC_8_1_ENUM_H typedef enum SurfaceEndian { ENDIAN_NONE = 0x0, ENDIAN_8IN16 = 0x1, ENDIAN_8IN32 = 0x2, ENDIAN_8IN64 = 0x3, } SurfaceEndian; typedef enum ArrayMode { ARRAY_LINEAR_GENERAL = 0x0, ARRAY_LINEAR_ALIGNED = 0x1, ARRAY_1D_TILED_THIN1 = 0x2, ARRAY_1D_TILED_THICK = 0x3, ARRAY_2D_TILED_THIN1 = 0x4, ARRAY_PRT_TILED_THIN1 = 0x5, ARRAY_PRT_2D_TILED_THIN1 = 0x6, ARRAY_2D_TILED_THICK = 0x7, ARRAY_2D_TILED_XTHICK = 0x8, ARRAY_PRT_TILED_THICK = 0x9, ARRAY_PRT_2D_TILED_THICK = 0xa, ARRAY_PRT_3D_TILED_THIN1 = 0xb, ARRAY_3D_TILED_THIN1 = 0xc, ARRAY_3D_TILED_THICK = 0xd, ARRAY_3D_TILED_XTHICK = 0xe, ARRAY_PRT_3D_TILED_THICK = 0xf, } ArrayMode; typedef enum PipeTiling { CONFIG_1_PIPE = 0x0, CONFIG_2_PIPE = 0x1, CONFIG_4_PIPE = 0x2, CONFIG_8_PIPE = 0x3, } PipeTiling; typedef enum BankTiling { CONFIG_4_BANK = 0x0, CONFIG_8_BANK = 0x1, } BankTiling; typedef enum GroupInterleave { CONFIG_256B_GROUP = 0x0, CONFIG_512B_GROUP = 0x1, } GroupInterleave; typedef enum RowTiling { CONFIG_1KB_ROW = 0x0, CONFIG_2KB_ROW = 0x1, CONFIG_4KB_ROW = 0x2, CONFIG_8KB_ROW = 0x3, CONFIG_1KB_ROW_OPT = 0x4, CONFIG_2KB_ROW_OPT = 0x5, CONFIG_4KB_ROW_OPT = 0x6, CONFIG_8KB_ROW_OPT = 0x7, } RowTiling; typedef enum BankSwapBytes { CONFIG_128B_SWAPS = 0x0, CONFIG_256B_SWAPS = 0x1, CONFIG_512B_SWAPS = 0x2, CONFIG_1KB_SWAPS = 0x3, } BankSwapBytes; typedef enum SampleSplitBytes { CONFIG_1KB_SPLIT = 0x0, CONFIG_2KB_SPLIT = 0x1, CONFIG_4KB_SPLIT = 0x2, CONFIG_8KB_SPLIT = 0x3, } SampleSplitBytes; typedef enum NumPipes { ADDR_CONFIG_1_PIPE = 0x0, ADDR_CONFIG_2_PIPE = 0x1, ADDR_CONFIG_4_PIPE = 0x2, ADDR_CONFIG_8_PIPE = 0x3, } NumPipes; typedef enum PipeInterleaveSize { ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, } PipeInterleaveSize; typedef enum BankInterleaveSize { ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, } BankInterleaveSize; typedef enum NumShaderEngines { ADDR_CONFIG_1_SHADER_ENGINE = 0x0, ADDR_CONFIG_2_SHADER_ENGINE = 0x1, } NumShaderEngines; typedef enum ShaderEngineTileSize { ADDR_CONFIG_SE_TILE_16 = 0x0, ADDR_CONFIG_SE_TILE_32 = 0x1, } ShaderEngineTileSize; typedef enum NumGPUs { ADDR_CONFIG_1_GPU = 0x0, ADDR_CONFIG_2_GPU = 0x1, ADDR_CONFIG_4_GPU = 0x2, } NumGPUs; typedef enum MultiGPUTileSize { ADDR_CONFIG_GPU_TILE_16 = 0x0, ADDR_CONFIG_GPU_TILE_32 = 0x1, ADDR_CONFIG_GPU_TILE_64 = 0x2, ADDR_CONFIG_GPU_TILE_128 = 0x3, } MultiGPUTileSize; typedef enum RowSize { ADDR_CONFIG_1KB_ROW = 0x0, ADDR_CONFIG_2KB_ROW = 0x1, ADDR_CONFIG_4KB_ROW = 0x2, } RowSize; typedef enum NumLowerPipes { ADDR_CONFIG_1_LOWER_PIPES = 0x0, ADDR_CONFIG_2_LOWER_PIPES = 0x1, } NumLowerPipes; typedef enum DebugBlockId { DBG_CLIENT_BLKID_RESERVED = 0x0, DBG_CLIENT_BLKID_dbg = 0x1, DBG_CLIENT_BLKID_scf2 = 0x2, DBG_CLIENT_BLKID_mcd5 = 0x3, DBG_CLIENT_BLKID_vmc = 0x4, DBG_CLIENT_BLKID_sx30 = 0x5, DBG_CLIENT_BLKID_mcd2 = 0x6, DBG_CLIENT_BLKID_bci1 = 0x7, DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0x8, DBG_CLIENT_BLKID_mcc0 = 0x9, DBG_CLIENT_BLKID_uvdf_0 = 0xa, DBG_CLIENT_BLKID_uvdf_1 = 0xb, DBG_CLIENT_BLKID_uvdf_2 = 0xc, DBG_CLIENT_BLKID_uvdi_0 = 0xd, DBG_CLIENT_BLKID_bci0 = 0xe, DBG_CLIENT_BLKID_vcec0_0 = 0xf, DBG_CLIENT_BLKID_cb100 = 0x10, DBG_CLIENT_BLKID_cb001 = 0x11, DBG_CLIENT_BLKID_mcd4 = 0x12, DBG_CLIENT_BLKID_tmonw00 = 0x13, DBG_CLIENT_BLKID_cb101 = 0x14, DBG_CLIENT_BLKID_sx10 = 0x15, DBG_CLIENT_BLKID_cb301 = 0x16, DBG_CLIENT_BLKID_tmonw01 = 0x17, DBG_CLIENT_BLKID_vcea0_0 = 0x18, DBG_CLIENT_BLKID_vcea0_1 = 0x19, DBG_CLIENT_BLKID_vcea0_2 = 0x1a, DBG_CLIENT_BLKID_vcea0_3 = 0x1b, DBG_CLIENT_BLKID_scf1 = 0x1c, DBG_CLIENT_BLKID_sx20 = 0x1d, DBG_CLIENT_BLKID_spim1 = 0x1e, DBG_CLIENT_BLKID_pa10 = 0x1f, DBG_CLIENT_BLKID_pa00 = 0x20, DBG_CLIENT_BLKID_gmcon = 0x21, DBG_CLIENT_BLKID_mcb = 0x22, DBG_CLIENT_BLKID_vgt0 = 0x23, DBG_CLIENT_BLKID_pc0 = 0x24, DBG_CLIENT_BLKID_bci2 = 0x25, DBG_CLIENT_BLKID_uvdb_0 = 0x26, DBG_CLIENT_BLKID_spim3 = 0x27, DBG_CLIENT_BLKID_cpc_0 = 0x28, DBG_CLIENT_BLKID_cpc_1 = 0x29, DBG_CLIENT_BLKID_uvdm_0 = 0x2a, DBG_CLIENT_BLKID_uvdm_1 = 0x2b, DBG_CLIENT_BLKID_uvdm_2 = 0x2c, DBG_CLIENT_BLKID_uvdm_3 = 0x2d, DBG_CLIENT_BLKID_cb000 = 0x2e, DBG_CLIENT_BLKID_spim0 = 0x2f, DBG_CLIENT_BLKID_mcc2 = 0x30, DBG_CLIENT_BLKID_ds0 = 0x31, DBG_CLIENT_BLKID_srbm = 0x32, DBG_CLIENT_BLKID_ih = 0x33, DBG_CLIENT_BLKID_sem = 0x34, DBG_CLIENT_BLKID_sdma_0 = 0x35, DBG_CLIENT_BLKID_sdma_1 = 0x36, DBG_CLIENT_BLKID_hdp = 0x37, DBG_CLIENT_BLKID_acp_0 = 0x38, DBG_CLIENT_BLKID_acp_1 = 0x39, DBG_CLIENT_BLKID_cb200 = 0x3a, DBG_CLIENT_BLKID_scf3 = 0x3b, DBG_CLIENT_BLKID_vceb1_0 = 0x3c, DBG_CLIENT_BLKID_vcea1_0 = 0x3d, DBG_CLIENT_BLKID_vcea1_1 = 0x3e, DBG_CLIENT_BLKID_vcea1_2 = 0x3f, DBG_CLIENT_BLKID_vcea1_3 = 0x40, DBG_CLIENT_BLKID_bci3 = 0x41, DBG_CLIENT_BLKID_mcd0 = 0x42, DBG_CLIENT_BLKID_pa11 = 0x43, DBG_CLIENT_BLKID_pa01 = 0x44, DBG_CLIENT_BLKID_cb201 = 0x45, DBG_CLIENT_BLKID_spim2 = 0x46, DBG_CLIENT_BLKID_vgt2 = 0x47, DBG_CLIENT_BLKID_pc2 = 0x48, DBG_CLIENT_BLKID_smu_0 = 0x49, DBG_CLIENT_BLKID_smu_1 = 0x4a, DBG_CLIENT_BLKID_smu_2 = 0x4b, DBG_CLIENT_BLKID_cb1 = 0x4c, DBG_CLIENT_BLKID_ia0 = 0x4d, DBG_CLIENT_BLKID_wd = 0x4e, DBG_CLIENT_BLKID_ia1 = 0x4f, DBG_CLIENT_BLKID_vcec1_0 = 0x50, DBG_CLIENT_BLKID_scf0 = 0x51, DBG_CLIENT_BLKID_vgt1 = 0x52, DBG_CLIENT_BLKID_pc1 = 0x53, DBG_CLIENT_BLKID_cb0 = 0x54, DBG_CLIENT_BLKID_gdc_one_0 = 0x55, DBG_CLIENT_BLKID_gdc_one_1 = 0x56, DBG_CLIENT_BLKID_gdc_one_2 = 0x57, DBG_CLIENT_BLKID_gdc_one_3 = 0x58, DBG_CLIENT_BLKID_gdc_one_4 = 0x59, DBG_CLIENT_BLKID_gdc_one_5 = 0x5a, DBG_CLIENT_BLKID_gdc_one_6 = 0x5b, DBG_CLIENT_BLKID_gdc_one_7 = 0x5c, DBG_CLIENT_BLKID_gdc_one_8 = 0x5d, DBG_CLIENT_BLKID_gdc_one_9 = 0x5e, DBG_CLIENT_BLKID_gdc_one_10 = 0x5f, DBG_CLIENT_BLKID_gdc_one_11 = 0x60, DBG_CLIENT_BLKID_gdc_one_12 = 0x61, DBG_CLIENT_BLKID_gdc_one_13 = 0x62, DBG_CLIENT_BLKID_gdc_one_14 = 0x63, DBG_CLIENT_BLKID_gdc_one_15 = 0x64, DBG_CLIENT_BLKID_gdc_one_16 = 0x65, DBG_CLIENT_BLKID_gdc_one_17 = 0x66, DBG_CLIENT_BLKID_gdc_one_18 = 0x67, DBG_CLIENT_BLKID_gdc_one_19 = 0x68, DBG_CLIENT_BLKID_gdc_one_20 = 0x69, DBG_CLIENT_BLKID_gdc_one_21 = 0x6a, DBG_CLIENT_BLKID_gdc_one_22 = 0x6b, DBG_CLIENT_BLKID_gdc_one_23 = 0x6c, DBG_CLIENT_BLKID_gdc_one_24 = 0x6d, DBG_CLIENT_BLKID_gdc_one_25 = 0x6e, DBG_CLIENT_BLKID_gdc_one_26 = 0x6f, DBG_CLIENT_BLKID_gdc_one_27 = 0x70, DBG_CLIENT_BLKID_gdc_one_28 = 0x71, DBG_CLIENT_BLKID_gdc_one_29 = 0x72, DBG_CLIENT_BLKID_gdc_one_30 = 0x73, DBG_CLIENT_BLKID_gdc_one_31 = 0x74, DBG_CLIENT_BLKID_gdc_one_32 = 0x75, DBG_CLIENT_BLKID_gdc_one_33 = 0x76, DBG_CLIENT_BLKID_gdc_one_34 = 0x77, DBG_CLIENT_BLKID_gdc_one_35 = 0x78, DBG_CLIENT_BLKID_vceb0_0 = 0x79, DBG_CLIENT_BLKID_vgt3 = 0x7a, DBG_CLIENT_BLKID_pc3 = 0x7b, DBG_CLIENT_BLKID_mcd3 = 0x7c, DBG_CLIENT_BLKID_uvdu_0 = 0x7d, DBG_CLIENT_BLKID_uvdu_1 = 0x7e, DBG_CLIENT_BLKID_uvdu_2 = 0x7f, DBG_CLIENT_BLKID_uvdu_3 = 0x80, DBG_CLIENT_BLKID_uvdu_4 = 0x81, DBG_CLIENT_BLKID_uvdu_5 = 0x82, DBG_CLIENT_BLKID_uvdu_6 = 0x83, DBG_CLIENT_BLKID_cb300 = 0x84, DBG_CLIENT_BLKID_mcd1 = 0x85, DBG_CLIENT_BLKID_sx00 = 0x86, DBG_CLIENT_BLKID_uvdc_0 = 0x87, DBG_CLIENT_BLKID_uvdc_1 = 0x88, DBG_CLIENT_BLKID_mcc3 = 0x89, DBG_CLIENT_BLKID_cpg_0 = 0x8a, DBG_CLIENT_BLKID_cpg_1 = 0x8b, DBG_CLIENT_BLKID_gck = 0x8c, DBG_CLIENT_BLKID_mcc1 = 0x8d, DBG_CLIENT_BLKID_cpf_0 = 0x8e, DBG_CLIENT_BLKID_cpf_1 = 0x8f, DBG_CLIENT_BLKID_rlc = 0x90, DBG_CLIENT_BLKID_grbm = 0x91, DBG_CLIENT_BLKID_sammsp = 0x92, DBG_CLIENT_BLKID_dci_pg = 0x93, DBG_CLIENT_BLKID_dci_0 = 0x94, DBG_CLIENT_BLKID_dccg0_0 = 0x95, DBG_CLIENT_BLKID_dccg0_1 = 0x96, DBG_CLIENT_BLKID_dcfe01_0 = 0x97, DBG_CLIENT_BLKID_dcfe02_0 = 0x98, DBG_CLIENT_BLKID_dcfe03_0 = 0x99, DBG_CLIENT_BLKID_dcfe04_0 = 0x9a, DBG_CLIENT_BLKID_dcfe05_0 = 0x9b, DBG_CLIENT_BLKID_dcfe06_0 = 0x9c, DBG_CLIENT_BLKID_RESERVED_LAST = 0x9d, } DebugBlockId; typedef enum DebugBlockId_OLD { DBG_BLOCK_ID_RESERVED = 0x0, DBG_BLOCK_ID_DBG = 0x1, DBG_BLOCK_ID_VMC = 0x2, DBG_BLOCK_ID_PDMA = 0x3, DBG_BLOCK_ID_CG = 0x4, DBG_BLOCK_ID_SRBM = 0x5, DBG_BLOCK_ID_GRBM = 0x6, DBG_BLOCK_ID_RLC = 0x7, DBG_BLOCK_ID_CSC = 0x8, DBG_BLOCK_ID_SEM = 0x9, DBG_BLOCK_ID_IH = 0xa, DBG_BLOCK_ID_SC = 0xb, DBG_BLOCK_ID_SQ = 0xc, DBG_BLOCK_ID_AVP = 0xd, DBG_BLOCK_ID_GMCON = 0xe, DBG_BLOCK_ID_SMU = 0xf, DBG_BLOCK_ID_DMA0 = 0x10, DBG_BLOCK_ID_DMA1 = 0x11, DBG_BLOCK_ID_SPIM = 0x12, DBG_BLOCK_ID_GDS = 0x13, DBG_BLOCK_ID_SPIS = 0x14, DBG_BLOCK_ID_UNUSED0 = 0x15, DBG_BLOCK_ID_PA0 = 0x16, DBG_BLOCK_ID_PA1 = 0x17, DBG_BLOCK_ID_CP0 = 0x18, DBG_BLOCK_ID_CP1 = 0x19, DBG_BLOCK_ID_CP2 = 0x1a, DBG_BLOCK_ID_UNUSED1 = 0x1b, DBG_BLOCK_ID_UVDU = 0x1c, DBG_BLOCK_ID_UVDM = 0x1d, DBG_BLOCK_ID_VCE = 0x1e, DBG_BLOCK_ID_UNUSED2 = 0x1f, DBG_BLOCK_ID_VGT0 = 0x20, DBG_BLOCK_ID_VGT1 = 0x21, DBG_BLOCK_ID_IA = 0x22, DBG_BLOCK_ID_UNUSED3 = 0x23, DBG_BLOCK_ID_SCT0 = 0x24, DBG_BLOCK_ID_SCT1 = 0x25, DBG_BLOCK_ID_SPM0 = 0x26, DBG_BLOCK_ID_SPM1 = 0x27, DBG_BLOCK_ID_TCAA = 0x28, DBG_BLOCK_ID_TCAB = 0x29, DBG_BLOCK_ID_TCCA = 0x2a, DBG_BLOCK_ID_TCCB = 0x2b, DBG_BLOCK_ID_MCC0 = 0x2c, DBG_BLOCK_ID_MCC1 = 0x2d, DBG_BLOCK_ID_MCC2 = 0x2e, DBG_BLOCK_ID_MCC3 = 0x2f, DBG_BLOCK_ID_SX0 = 0x30, DBG_BLOCK_ID_SX1 = 0x31, DBG_BLOCK_ID_SX2 = 0x32, DBG_BLOCK_ID_SX3 = 0x33, DBG_BLOCK_ID_UNUSED4 = 0x34, DBG_BLOCK_ID_UNUSED5 = 0x35, DBG_BLOCK_ID_UNUSED6 = 0x36, DBG_BLOCK_ID_UNUSED7 = 0x37, DBG_BLOCK_ID_PC0 = 0x38, DBG_BLOCK_ID_PC1 = 0x39, DBG_BLOCK_ID_UNUSED8 = 0x3a, DBG_BLOCK_ID_UNUSED9 = 0x3b, DBG_BLOCK_ID_UNUSED10 = 0x3c, DBG_BLOCK_ID_UNUSED11 = 0x3d, DBG_BLOCK_ID_MCB = 0x3e, DBG_BLOCK_ID_UNUSED12 = 0x3f, DBG_BLOCK_ID_SCB0 = 0x40, DBG_BLOCK_ID_SCB1 = 0x41, DBG_BLOCK_ID_UNUSED13 = 0x42, DBG_BLOCK_ID_UNUSED14 = 0x43, DBG_BLOCK_ID_SCF0 = 0x44, DBG_BLOCK_ID_SCF1 = 0x45, DBG_BLOCK_ID_UNUSED15 = 0x46, DBG_BLOCK_ID_UNUSED16 = 0x47, DBG_BLOCK_ID_BCI0 = 0x48, DBG_BLOCK_ID_BCI1 = 0x49, DBG_BLOCK_ID_BCI2 = 0x4a, DBG_BLOCK_ID_BCI3 = 0x4b, DBG_BLOCK_ID_UNUSED17 = 0x4c, DBG_BLOCK_ID_UNUSED18 = 0x4d, DBG_BLOCK_ID_UNUSED19 = 0x4e, DBG_BLOCK_ID_UNUSED20 = 0x4f, DBG_BLOCK_ID_CB00 = 0x50, DBG_BLOCK_ID_CB01 = 0x51, DBG_BLOCK_ID_CB02 = 0x52, DBG_BLOCK_ID_CB03 = 0x53, DBG_BLOCK_ID_CB04 = 0x54, DBG_BLOCK_ID_UNUSED21 = 0x55, DBG_BLOCK_ID_UNUSED22 = 0x56, DBG_BLOCK_ID_UNUSED23 = 0x57, DBG_BLOCK_ID_CB10 = 0x58, DBG_BLOCK_ID_CB11 = 0x59, DBG_BLOCK_ID_CB12 = 0x5a, DBG_BLOCK_ID_CB13 = 0x5b, DBG_BLOCK_ID_CB14 = 0x5c, DBG_BLOCK_ID_UNUSED24 = 0x5d, DBG_BLOCK_ID_UNUSED25 = 0x5e, DBG_BLOCK_ID_UNUSED26 = 0x5f, DBG_BLOCK_ID_TCP0 = 0x60, DBG_BLOCK_ID_TCP1 = 0x61, DBG_BLOCK_ID_TCP2 = 0x62, DBG_BLOCK_ID_TCP3 = 0x63, DBG_BLOCK_ID_TCP4 = 0x64, DBG_BLOCK_ID_TCP5 = 0x65, DBG_BLOCK_ID_TCP6 = 0x66, DBG_BLOCK_ID_TCP7 = 0x67, DBG_BLOCK_ID_TCP8 = 0x68, DBG_BLOCK_ID_TCP9 = 0x69, DBG_BLOCK_ID_TCP10 = 0x6a, DBG_BLOCK_ID_TCP11 = 0x6b, DBG_BLOCK_ID_TCP12 = 0x6c, DBG_BLOCK_ID_TCP13 = 0x6d, DBG_BLOCK_ID_TCP14 = 0x6e, DBG_BLOCK_ID_TCP15 = 0x6f, DBG_BLOCK_ID_TCP16 = 0x70, DBG_BLOCK_ID_TCP17 = 0x71, DBG_BLOCK_ID_TCP18 = 0x72, DBG_BLOCK_ID_TCP19 = 0x73, DBG_BLOCK_ID_TCP20 = 0x74, DBG_BLOCK_ID_TCP21 = 0x75, DBG_BLOCK_ID_TCP22 = 0x76, DBG_BLOCK_ID_TCP23 = 0x77, DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, DBG_BLOCK_ID_DB00 = 0x80, DBG_BLOCK_ID_DB01 = 0x81, DBG_BLOCK_ID_DB02 = 0x82, DBG_BLOCK_ID_DB03 = 0x83, DBG_BLOCK_ID_DB04 = 0x84, DBG_BLOCK_ID_UNUSED27 = 0x85, DBG_BLOCK_ID_UNUSED28 = 0x86, DBG_BLOCK_ID_UNUSED29 = 0x87, DBG_BLOCK_ID_DB10 = 0x88, DBG_BLOCK_ID_DB11 = 0x89, DBG_BLOCK_ID_DB12 = 0x8a, DBG_BLOCK_ID_DB13 = 0x8b, DBG_BLOCK_ID_DB14 = 0x8c, DBG_BLOCK_ID_UNUSED30 = 0x8d, DBG_BLOCK_ID_UNUSED31 = 0x8e, DBG_BLOCK_ID_UNUSED32 = 0x8f, DBG_BLOCK_ID_TCC0 = 0x90, DBG_BLOCK_ID_TCC1 = 0x91, DBG_BLOCK_ID_TCC2 = 0x92, DBG_BLOCK_ID_TCC3 = 0x93, DBG_BLOCK_ID_TCC4 = 0x94, DBG_BLOCK_ID_TCC5 = 0x95, DBG_BLOCK_ID_TCC6 = 0x96, DBG_BLOCK_ID_TCC7 = 0x97, DBG_BLOCK_ID_SPS00 = 0x98, DBG_BLOCK_ID_SPS01 = 0x99, DBG_BLOCK_ID_SPS02 = 0x9a, DBG_BLOCK_ID_SPS10 = 0x9b, DBG_BLOCK_ID_SPS11 = 0x9c, DBG_BLOCK_ID_SPS12 = 0x9d, DBG_BLOCK_ID_UNUSED33 = 0x9e, DBG_BLOCK_ID_UNUSED34 = 0x9f, DBG_BLOCK_ID_TA00 = 0xa0, DBG_BLOCK_ID_TA01 = 0xa1, DBG_BLOCK_ID_TA02 = 0xa2, DBG_BLOCK_ID_TA03 = 0xa3, DBG_BLOCK_ID_TA04 = 0xa4, DBG_BLOCK_ID_TA05 = 0xa5, DBG_BLOCK_ID_TA06 = 0xa6, DBG_BLOCK_ID_TA07 = 0xa7, DBG_BLOCK_ID_TA08 = 0xa8, DBG_BLOCK_ID_TA09 = 0xa9, DBG_BLOCK_ID_TA0A = 0xaa, DBG_BLOCK_ID_TA0B = 0xab, DBG_BLOCK_ID_UNUSED35 = 0xac, DBG_BLOCK_ID_UNUSED36 = 0xad, DBG_BLOCK_ID_UNUSED37 = 0xae, DBG_BLOCK_ID_UNUSED38 = 0xaf, DBG_BLOCK_ID_TA10 = 0xb0, DBG_BLOCK_ID_TA11 = 0xb1, DBG_BLOCK_ID_TA12 = 0xb2, DBG_BLOCK_ID_TA13 = 0xb3, DBG_BLOCK_ID_TA14 = 0xb4, DBG_BLOCK_ID_TA15 = 0xb5, DBG_BLOCK_ID_TA16 = 0xb6, DBG_BLOCK_ID_TA17 = 0xb7, DBG_BLOCK_ID_TA18 = 0xb8, DBG_BLOCK_ID_TA19 = 0xb9, DBG_BLOCK_ID_TA1A = 0xba, DBG_BLOCK_ID_TA1B = 0xbb, DBG_BLOCK_ID_UNUSED39 = 0xbc, DBG_BLOCK_ID_UNUSED40 = 0xbd, DBG_BLOCK_ID_UNUSED41 = 0xbe, DBG_BLOCK_ID_UNUSED42 = 0xbf, DBG_BLOCK_ID_TD00 = 0xc0, DBG_BLOCK_ID_TD01 = 0xc1, DBG_BLOCK_ID_TD02 = 0xc2, DBG_BLOCK_ID_TD03 = 0xc3, DBG_BLOCK_ID_TD04 = 0xc4, DBG_BLOCK_ID_TD05 = 0xc5, DBG_BLOCK_ID_TD06 = 0xc6, DBG_BLOCK_ID_TD07 = 0xc7, DBG_BLOCK_ID_TD08 = 0xc8, DBG_BLOCK_ID_TD09 = 0xc9, DBG_BLOCK_ID_TD0A = 0xca, DBG_BLOCK_ID_TD0B = 0xcb, DBG_BLOCK_ID_UNUSED43 = 0xcc, DBG_BLOCK_ID_UNUSED44 = 0xcd, DBG_BLOCK_ID_UNUSED45 = 0xce, DBG_BLOCK_ID_UNUSED46 = 0xcf, DBG_BLOCK_ID_TD10 = 0xd0, DBG_BLOCK_ID_TD11 = 0xd1, DBG_BLOCK_ID_TD12 = 0xd2, DBG_BLOCK_ID_TD13 = 0xd3, DBG_BLOCK_ID_TD14 = 0xd4, DBG_BLOCK_ID_TD15 = 0xd5, DBG_BLOCK_ID_TD16 = 0xd6, DBG_BLOCK_ID_TD17 = 0xd7, DBG_BLOCK_ID_TD18 = 0xd8, DBG_BLOCK_ID_TD19 = 0xd9, DBG_BLOCK_ID_TD1A = 0xda, DBG_BLOCK_ID_TD1B = 0xdb, DBG_BLOCK_ID_UNUSED47 = 0xdc, DBG_BLOCK_ID_UNUSED48 = 0xdd, DBG_BLOCK_ID_UNUSED49 = 0xde, DBG_BLOCK_ID_UNUSED50 = 0xdf, DBG_BLOCK_ID_MCD0 = 0xe0, DBG_BLOCK_ID_MCD1 = 0xe1, DBG_BLOCK_ID_MCD2 = 0xe2, DBG_BLOCK_ID_MCD3 = 0xe3, DBG_BLOCK_ID_MCD4 = 0xe4, DBG_BLOCK_ID_MCD5 = 0xe5, DBG_BLOCK_ID_UNUSED51 = 0xe6, DBG_BLOCK_ID_UNUSED52 = 0xe7, } DebugBlockId_OLD; typedef enum DebugBlockId_BY2 { DBG_BLOCK_ID_RESERVED_BY2 = 0x0, DBG_BLOCK_ID_VMC_BY2 = 0x1, DBG_BLOCK_ID_CG_BY2 = 0x2, DBG_BLOCK_ID_GRBM_BY2 = 0x3, DBG_BLOCK_ID_CSC_BY2 = 0x4, DBG_BLOCK_ID_IH_BY2 = 0x5, DBG_BLOCK_ID_SQ_BY2 = 0x6, DBG_BLOCK_ID_GMCON_BY2 = 0x7, DBG_BLOCK_ID_DMA0_BY2 = 0x8, DBG_BLOCK_ID_SPIM_BY2 = 0x9, DBG_BLOCK_ID_SPIS_BY2 = 0xa, DBG_BLOCK_ID_PA0_BY2 = 0xb, DBG_BLOCK_ID_CP0_BY2 = 0xc, DBG_BLOCK_ID_CP2_BY2 = 0xd, DBG_BLOCK_ID_UVDU_BY2 = 0xe, DBG_BLOCK_ID_VCE_BY2 = 0xf, DBG_BLOCK_ID_VGT0_BY2 = 0x10, DBG_BLOCK_ID_IA_BY2 = 0x11, DBG_BLOCK_ID_SCT0_BY2 = 0x12, DBG_BLOCK_ID_SPM0_BY2 = 0x13, DBG_BLOCK_ID_TCAA_BY2 = 0x14, DBG_BLOCK_ID_TCCA_BY2 = 0x15, DBG_BLOCK_ID_MCC0_BY2 = 0x16, DBG_BLOCK_ID_MCC2_BY2 = 0x17, DBG_BLOCK_ID_SX0_BY2 = 0x18, DBG_BLOCK_ID_SX2_BY2 = 0x19, DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, DBG_BLOCK_ID_PC0_BY2 = 0x1c, DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, DBG_BLOCK_ID_MCB_BY2 = 0x1f, DBG_BLOCK_ID_SCB0_BY2 = 0x20, DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, DBG_BLOCK_ID_SCF0_BY2 = 0x22, DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, DBG_BLOCK_ID_BCI0_BY2 = 0x24, DBG_BLOCK_ID_BCI2_BY2 = 0x25, DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, DBG_BLOCK_ID_CB00_BY2 = 0x28, DBG_BLOCK_ID_CB02_BY2 = 0x29, DBG_BLOCK_ID_CB04_BY2 = 0x2a, DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, DBG_BLOCK_ID_CB10_BY2 = 0x2c, DBG_BLOCK_ID_CB12_BY2 = 0x2d, DBG_BLOCK_ID_CB14_BY2 = 0x2e, DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, DBG_BLOCK_ID_TCP0_BY2 = 0x30, DBG_BLOCK_ID_TCP2_BY2 = 0x31, DBG_BLOCK_ID_TCP4_BY2 = 0x32, DBG_BLOCK_ID_TCP6_BY2 = 0x33, DBG_BLOCK_ID_TCP8_BY2 = 0x34, DBG_BLOCK_ID_TCP10_BY2 = 0x35, DBG_BLOCK_ID_TCP12_BY2 = 0x36, DBG_BLOCK_ID_TCP14_BY2 = 0x37, DBG_BLOCK_ID_TCP16_BY2 = 0x38, DBG_BLOCK_ID_TCP18_BY2 = 0x39, DBG_BLOCK_ID_TCP20_BY2 = 0x3a, DBG_BLOCK_ID_TCP22_BY2 = 0x3b, DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, DBG_BLOCK_ID_DB00_BY2 = 0x40, DBG_BLOCK_ID_DB02_BY2 = 0x41, DBG_BLOCK_ID_DB04_BY2 = 0x42, DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, DBG_BLOCK_ID_DB10_BY2 = 0x44, DBG_BLOCK_ID_DB12_BY2 = 0x45, DBG_BLOCK_ID_DB14_BY2 = 0x46, DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, DBG_BLOCK_ID_TCC0_BY2 = 0x48, DBG_BLOCK_ID_TCC2_BY2 = 0x49, DBG_BLOCK_ID_TCC4_BY2 = 0x4a, DBG_BLOCK_ID_TCC6_BY2 = 0x4b, DBG_BLOCK_ID_SPS00_BY2 = 0x4c, DBG_BLOCK_ID_SPS02_BY2 = 0x4d, DBG_BLOCK_ID_SPS11_BY2 = 0x4e, DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, DBG_BLOCK_ID_TA00_BY2 = 0x50, DBG_BLOCK_ID_TA02_BY2 = 0x51, DBG_BLOCK_ID_TA04_BY2 = 0x52, DBG_BLOCK_ID_TA06_BY2 = 0x53, DBG_BLOCK_ID_TA08_BY2 = 0x54, DBG_BLOCK_ID_TA0A_BY2 = 0x55, DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, DBG_BLOCK_ID_TA10_BY2 = 0x58, DBG_BLOCK_ID_TA12_BY2 = 0x59, DBG_BLOCK_ID_TA14_BY2 = 0x5a, DBG_BLOCK_ID_TA16_BY2 = 0x5b, DBG_BLOCK_ID_TA18_BY2 = 0x5c, DBG_BLOCK_ID_TA1A_BY2 = 0x5d, DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, DBG_BLOCK_ID_TD00_BY2 = 0x60, DBG_BLOCK_ID_TD02_BY2 = 0x61, DBG_BLOCK_ID_TD04_BY2 = 0x62, DBG_BLOCK_ID_TD06_BY2 = 0x63, DBG_BLOCK_ID_TD08_BY2 = 0x64, DBG_BLOCK_ID_TD0A_BY2 = 0x65, DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, DBG_BLOCK_ID_TD10_BY2 = 0x68, DBG_BLOCK_ID_TD12_BY2 = 0x69, DBG_BLOCK_ID_TD14_BY2 = 0x6a, DBG_BLOCK_ID_TD16_BY2 = 0x6b, DBG_BLOCK_ID_TD18_BY2 = 0x6c, DBG_BLOCK_ID_TD1A_BY2 = 0x6d, DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, DBG_BLOCK_ID_MCD0_BY2 = 0x70, DBG_BLOCK_ID_MCD2_BY2 = 0x71, DBG_BLOCK_ID_MCD4_BY2 = 0x72, DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, } DebugBlockId_BY2; typedef enum DebugBlockId_BY4 { DBG_BLOCK_ID_RESERVED_BY4 = 0x0, DBG_BLOCK_ID_CG_BY4 = 0x1, DBG_BLOCK_ID_CSC_BY4 = 0x2, DBG_BLOCK_ID_SQ_BY4 = 0x3, DBG_BLOCK_ID_DMA0_BY4 = 0x4, DBG_BLOCK_ID_SPIS_BY4 = 0x5, DBG_BLOCK_ID_CP0_BY4 = 0x6, DBG_BLOCK_ID_UVDU_BY4 = 0x7, DBG_BLOCK_ID_VGT0_BY4 = 0x8, DBG_BLOCK_ID_SCT0_BY4 = 0x9, DBG_BLOCK_ID_TCAA_BY4 = 0xa, DBG_BLOCK_ID_MCC0_BY4 = 0xb, DBG_BLOCK_ID_SX0_BY4 = 0xc, DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, DBG_BLOCK_ID_PC0_BY4 = 0xe, DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, DBG_BLOCK_ID_SCB0_BY4 = 0x10, DBG_BLOCK_ID_SCF0_BY4 = 0x11, DBG_BLOCK_ID_BCI0_BY4 = 0x12, DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, DBG_BLOCK_ID_CB00_BY4 = 0x14, DBG_BLOCK_ID_CB04_BY4 = 0x15, DBG_BLOCK_ID_CB10_BY4 = 0x16, DBG_BLOCK_ID_CB14_BY4 = 0x17, DBG_BLOCK_ID_TCP0_BY4 = 0x18, DBG_BLOCK_ID_TCP4_BY4 = 0x19, DBG_BLOCK_ID_TCP8_BY4 = 0x1a, DBG_BLOCK_ID_TCP12_BY4 = 0x1b, DBG_BLOCK_ID_TCP16_BY4 = 0x1c, DBG_BLOCK_ID_TCP20_BY4 = 0x1d, DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, DBG_BLOCK_ID_DB_BY4 = 0x20, DBG_BLOCK_ID_DB04_BY4 = 0x21, DBG_BLOCK_ID_DB10_BY4 = 0x22, DBG_BLOCK_ID_DB14_BY4 = 0x23, DBG_BLOCK_ID_TCC0_BY4 = 0x24, DBG_BLOCK_ID_TCC4_BY4 = 0x25, DBG_BLOCK_ID_SPS00_BY4 = 0x26, DBG_BLOCK_ID_SPS11_BY4 = 0x27, DBG_BLOCK_ID_TA00_BY4 = 0x28, DBG_BLOCK_ID_TA04_BY4 = 0x29, DBG_BLOCK_ID_TA08_BY4 = 0x2a, DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, DBG_BLOCK_ID_TA10_BY4 = 0x2c, DBG_BLOCK_ID_TA14_BY4 = 0x2d, DBG_BLOCK_ID_TA18_BY4 = 0x2e, DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, DBG_BLOCK_ID_TD00_BY4 = 0x30, DBG_BLOCK_ID_TD04_BY4 = 0x31, DBG_BLOCK_ID_TD08_BY4 = 0x32, DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, DBG_BLOCK_ID_TD10_BY4 = 0x34, DBG_BLOCK_ID_TD14_BY4 = 0x35, DBG_BLOCK_ID_TD18_BY4 = 0x36, DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, DBG_BLOCK_ID_MCD0_BY4 = 0x38, DBG_BLOCK_ID_MCD4_BY4 = 0x39, } DebugBlockId_BY4; typedef enum DebugBlockId_BY8 { DBG_BLOCK_ID_RESERVED_BY8 = 0x0, DBG_BLOCK_ID_CSC_BY8 = 0x1, DBG_BLOCK_ID_DMA0_BY8 = 0x2, DBG_BLOCK_ID_CP0_BY8 = 0x3, DBG_BLOCK_ID_VGT0_BY8 = 0x4, DBG_BLOCK_ID_TCAA_BY8 = 0x5, DBG_BLOCK_ID_SX0_BY8 = 0x6, DBG_BLOCK_ID_PC0_BY8 = 0x7, DBG_BLOCK_ID_SCB0_BY8 = 0x8, DBG_BLOCK_ID_BCI0_BY8 = 0x9, DBG_BLOCK_ID_CB00_BY8 = 0xa, DBG_BLOCK_ID_CB10_BY8 = 0xb, DBG_BLOCK_ID_TCP0_BY8 = 0xc, DBG_BLOCK_ID_TCP8_BY8 = 0xd, DBG_BLOCK_ID_TCP16_BY8 = 0xe, DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, DBG_BLOCK_ID_DB00_BY8 = 0x10, DBG_BLOCK_ID_DB10_BY8 = 0x11, DBG_BLOCK_ID_TCC0_BY8 = 0x12, DBG_BLOCK_ID_SPS00_BY8 = 0x13, DBG_BLOCK_ID_TA00_BY8 = 0x14, DBG_BLOCK_ID_TA08_BY8 = 0x15, DBG_BLOCK_ID_TA10_BY8 = 0x16, DBG_BLOCK_ID_TA18_BY8 = 0x17, DBG_BLOCK_ID_TD00_BY8 = 0x18, DBG_BLOCK_ID_TD08_BY8 = 0x19, DBG_BLOCK_ID_TD10_BY8 = 0x1a, DBG_BLOCK_ID_TD18_BY8 = 0x1b, DBG_BLOCK_ID_MCD0_BY8 = 0x1c, } DebugBlockId_BY8; typedef enum DebugBlockId_BY16 { DBG_BLOCK_ID_RESERVED_BY16 = 0x0, DBG_BLOCK_ID_DMA0_BY16 = 0x1, DBG_BLOCK_ID_VGT0_BY16 = 0x2, DBG_BLOCK_ID_SX0_BY16 = 0x3, DBG_BLOCK_ID_SCB0_BY16 = 0x4, DBG_BLOCK_ID_CB00_BY16 = 0x5, DBG_BLOCK_ID_TCP0_BY16 = 0x6, DBG_BLOCK_ID_TCP16_BY16 = 0x7, DBG_BLOCK_ID_DB00_BY16 = 0x8, DBG_BLOCK_ID_TCC0_BY16 = 0x9, DBG_BLOCK_ID_TA00_BY16 = 0xa, DBG_BLOCK_ID_TA10_BY16 = 0xb, DBG_BLOCK_ID_TD00_BY16 = 0xc, DBG_BLOCK_ID_TD10_BY16 = 0xd, DBG_BLOCK_ID_MCD0_BY16 = 0xe, } DebugBlockId_BY16; typedef enum ColorTransform { DCC_CT_AUTO = 0x0, DCC_CT_NONE = 0x1, ABGR_TO_A_BG_G_RB = 0x2, BGRA_TO_BG_G_RB_A = 0x3, } ColorTransform; typedef enum CompareRef { REF_NEVER = 0x0, REF_LESS = 0x1, REF_EQUAL = 0x2, REF_LEQUAL = 0x3, REF_GREATER = 0x4, REF_NOTEQUAL = 0x5, REF_GEQUAL = 0x6, REF_ALWAYS = 0x7, } CompareRef; typedef enum ReadSize { READ_256_BITS = 0x0, READ_512_BITS = 0x1, } ReadSize; typedef enum DepthFormat { DEPTH_INVALID = 0x0, DEPTH_16 = 0x1, DEPTH_X8_24 = 0x2, DEPTH_8_24 = 0x3, DEPTH_X8_24_FLOAT = 0x4, DEPTH_8_24_FLOAT = 0x5, DEPTH_32_FLOAT = 0x6, DEPTH_X24_8_32_FLOAT = 0x7, } DepthFormat; typedef enum ZFormat { Z_INVALID = 0x0, Z_16 = 0x1, Z_24 = 0x2, Z_32_FLOAT = 0x3, } ZFormat; typedef enum StencilFormat { STENCIL_INVALID = 0x0, STENCIL_8 = 0x1, } StencilFormat; typedef enum CmaskMode { CMASK_CLEAR_NONE = 0x0, CMASK_CLEAR_ONE = 0x1, CMASK_CLEAR_ALL = 0x2, CMASK_ANY_EXPANDED = 0x3, CMASK_ALPHA0_FRAG1 = 0x4, CMASK_ALPHA0_FRAG2 = 0x5, CMASK_ALPHA0_FRAG4 = 0x6, CMASK_ALPHA0_FRAGS = 0x7, CMASK_ALPHA1_FRAG1 = 0x8, CMASK_ALPHA1_FRAG2 = 0x9, CMASK_ALPHA1_FRAG4 = 0xa, CMASK_ALPHA1_FRAGS = 0xb, CMASK_ALPHAX_FRAG1 = 0xc, CMASK_ALPHAX_FRAG2 = 0xd, CMASK_ALPHAX_FRAG4 = 0xe, CMASK_ALPHAX_FRAGS = 0xf, } CmaskMode; typedef enum QuadExportFormat { EXPORT_UNUSED = 0x0, EXPORT_32_R = 0x1, EXPORT_32_GR = 0x2, EXPORT_32_AR = 0x3, EXPORT_FP16_ABGR = 0x4, EXPORT_UNSIGNED16_ABGR = 0x5, EXPORT_SIGNED16_ABGR = 0x6, EXPORT_32_ABGR = 0x7, } QuadExportFormat; typedef enum QuadExportFormatOld { EXPORT_4P_32BPC_ABGR = 0x0, EXPORT_4P_16BPC_ABGR = 0x1, EXPORT_4P_32BPC_GR = 0x2, EXPORT_4P_32BPC_AR = 0x3, EXPORT_2P_32BPC_ABGR = 0x4, EXPORT_8P_32BPC_R = 0x5, } QuadExportFormatOld; typedef enum ColorFormat { COLOR_INVALID = 0x0, COLOR_8 = 0x1, COLOR_16 = 0x2, COLOR_8_8 = 0x3, COLOR_32 = 0x4, COLOR_16_16 = 0x5, COLOR_10_11_11 = 0x6, COLOR_11_11_10 = 0x7, COLOR_10_10_10_2 = 0x8, COLOR_2_10_10_10 = 0x9, COLOR_8_8_8_8 = 0xa, COLOR_32_32 = 0xb, COLOR_16_16_16_16 = 0xc, COLOR_RESERVED_13 = 0xd, COLOR_32_32_32_32 = 0xe, COLOR_RESERVED_15 = 0xf, COLOR_5_6_5 = 0x10, COLOR_1_5_5_5 = 0x11, COLOR_5_5_5_1 = 0x12, COLOR_4_4_4_4 = 0x13, COLOR_8_24 = 0x14, COLOR_24_8 = 0x15, COLOR_X24_8_32_FLOAT = 0x16, COLOR_RESERVED_23 = 0x17, } ColorFormat; typedef enum SurfaceFormat { FMT_INVALID = 0x0, FMT_8 = 0x1, FMT_16 = 0x2, FMT_8_8 = 0x3, FMT_32 = 0x4, FMT_16_16 = 0x5, FMT_10_11_11 = 0x6, FMT_11_11_10 = 0x7, FMT_10_10_10_2 = 0x8, FMT_2_10_10_10 = 0x9, FMT_8_8_8_8 = 0xa, FMT_32_32 = 0xb, FMT_16_16_16_16 = 0xc, FMT_32_32_32 = 0xd, FMT_32_32_32_32 = 0xe, FMT_RESERVED_4 = 0xf, FMT_5_6_5 = 0x10, FMT_1_5_5_5 = 0x11, FMT_5_5_5_1 = 0x12, FMT_4_4_4_4 = 0x13, FMT_8_24 = 0x14, FMT_24_8 = 0x15, FMT_X24_8_32_FLOAT = 0x16, FMT_RESERVED_33 = 0x17, FMT_11_11_10_FLOAT = 0x18, FMT_16_FLOAT = 0x19, FMT_32_FLOAT = 0x1a, FMT_16_16_FLOAT = 0x1b, FMT_8_24_FLOAT = 0x1c, FMT_24_8_FLOAT = 0x1d, FMT_32_32_FLOAT = 0x1e, FMT_10_11_11_FLOAT = 0x1f, FMT_16_16_16_16_FLOAT = 0x20, FMT_3_3_2 = 0x21, FMT_6_5_5 = 0x22, FMT_32_32_32_32_FLOAT = 0x23, FMT_RESERVED_36 = 0x24, FMT_1 = 0x25, FMT_1_REVERSED = 0x26, FMT_GB_GR = 0x27, FMT_BG_RG = 0x28, FMT_32_AS_8 = 0x29, FMT_32_AS_8_8 = 0x2a, FMT_5_9_9_9_SHAREDEXP = 0x2b, FMT_8_8_8 = 0x2c, FMT_16_16_16 = 0x2d, FMT_16_16_16_FLOAT = 0x2e, FMT_4_4 = 0x2f, FMT_32_32_32_FLOAT = 0x30, FMT_BC1 = 0x31, FMT_BC2 = 0x32, FMT_BC3 = 0x33, FMT_BC4 = 0x34, FMT_BC5 = 0x35, FMT_BC6 = 0x36, FMT_BC7 = 0x37, FMT_32_AS_32_32_32_32 = 0x38, FMT_APC3 = 0x39, FMT_APC4 = 0x3a, FMT_APC5 = 0x3b, FMT_APC6 = 0x3c, FMT_APC7 = 0x3d, FMT_CTX1 = 0x3e, FMT_RESERVED_63 = 0x3f, } SurfaceFormat; typedef enum BUF_DATA_FORMAT { BUF_DATA_FORMAT_INVALID = 0x0, BUF_DATA_FORMAT_8 = 0x1, BUF_DATA_FORMAT_16 = 0x2, BUF_DATA_FORMAT_8_8 = 0x3, BUF_DATA_FORMAT_32 = 0x4, BUF_DATA_FORMAT_16_16 = 0x5, BUF_DATA_FORMAT_10_11_11 = 0x6, BUF_DATA_FORMAT_11_11_10 = 0x7, BUF_DATA_FORMAT_10_10_10_2 = 0x8, BUF_DATA_FORMAT_2_10_10_10 = 0x9, BUF_DATA_FORMAT_8_8_8_8 = 0xa, BUF_DATA_FORMAT_32_32 = 0xb, BUF_DATA_FORMAT_16_16_16_16 = 0xc, BUF_DATA_FORMAT_32_32_32 = 0xd, BUF_DATA_FORMAT_32_32_32_32 = 0xe, BUF_DATA_FORMAT_RESERVED_15 = 0xf, } BUF_DATA_FORMAT; typedef enum IMG_DATA_FORMAT { IMG_DATA_FORMAT_INVALID = 0x0, IMG_DATA_FORMAT_8 = 0x1, IMG_DATA_FORMAT_16 = 0x2, IMG_DATA_FORMAT_8_8 = 0x3, IMG_DATA_FORMAT_32 = 0x4, IMG_DATA_FORMAT_16_16 = 0x5, IMG_DATA_FORMAT_10_11_11 = 0x6, IMG_DATA_FORMAT_11_11_10 = 0x7, IMG_DATA_FORMAT_10_10_10_2 = 0x8, IMG_DATA_FORMAT_2_10_10_10 = 0x9, IMG_DATA_FORMAT_8_8_8_8 = 0xa, IMG_DATA_FORMAT_32_32 = 0xb, IMG_DATA_FORMAT_16_16_16_16 = 0xc, IMG_DATA_FORMAT_32_32_32 = 0xd, IMG_DATA_FORMAT_32_32_32_32 = 0xe, IMG_DATA_FORMAT_RESERVED_15 = 0xf, IMG_DATA_FORMAT_5_6_5 = 0x10, IMG_DATA_FORMAT_1_5_5_5 = 0x11, IMG_DATA_FORMAT_5_5_5_1 = 0x12, IMG_DATA_FORMAT_4_4_4_4 = 0x13, IMG_DATA_FORMAT_8_24 = 0x14, IMG_DATA_FORMAT_24_8 = 0x15, IMG_DATA_FORMAT_X24_8_32 = 0x16, IMG_DATA_FORMAT_RESERVED_23 = 0x17, IMG_DATA_FORMAT_RESERVED_24 = 0x18, IMG_DATA_FORMAT_RESERVED_25 = 0x19, IMG_DATA_FORMAT_RESERVED_26 = 0x1a, IMG_DATA_FORMAT_RESERVED_27 = 0x1b, IMG_DATA_FORMAT_RESERVED_28 = 0x1c, IMG_DATA_FORMAT_RESERVED_29 = 0x1d, IMG_DATA_FORMAT_RESERVED_30 = 0x1e, IMG_DATA_FORMAT_RESERVED_31 = 0x1f, IMG_DATA_FORMAT_GB_GR = 0x20, IMG_DATA_FORMAT_BG_RG = 0x21, IMG_DATA_FORMAT_5_9_9_9 = 0x22, IMG_DATA_FORMAT_BC1 = 0x23, IMG_DATA_FORMAT_BC2 = 0x24, IMG_DATA_FORMAT_BC3 = 0x25, IMG_DATA_FORMAT_BC4 = 0x26, IMG_DATA_FORMAT_BC5 = 0x27, IMG_DATA_FORMAT_BC6 = 0x28, IMG_DATA_FORMAT_BC7 = 0x29, IMG_DATA_FORMAT_RESERVED_42 = 0x2a, IMG_DATA_FORMAT_RESERVED_43 = 0x2b, IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, IMG_DATA_FORMAT_4_4 = 0x39, IMG_DATA_FORMAT_6_5_5 = 0x3a, IMG_DATA_FORMAT_1 = 0x3b, IMG_DATA_FORMAT_1_REVERSED = 0x3c, IMG_DATA_FORMAT_32_AS_8 = 0x3d, IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, } IMG_DATA_FORMAT; typedef enum BUF_NUM_FORMAT { BUF_NUM_FORMAT_UNORM = 0x0, BUF_NUM_FORMAT_SNORM = 0x1, BUF_NUM_FORMAT_USCALED = 0x2, BUF_NUM_FORMAT_SSCALED = 0x3, BUF_NUM_FORMAT_UINT = 0x4, BUF_NUM_FORMAT_SINT = 0x5, BUF_NUM_FORMAT_RESERVED_6 = 0x6, BUF_NUM_FORMAT_FLOAT = 0x7, } BUF_NUM_FORMAT; typedef enum IMG_NUM_FORMAT { IMG_NUM_FORMAT_UNORM = 0x0, IMG_NUM_FORMAT_SNORM = 0x1, IMG_NUM_FORMAT_USCALED = 0x2, IMG_NUM_FORMAT_SSCALED = 0x3, IMG_NUM_FORMAT_UINT = 0x4, IMG_NUM_FORMAT_SINT = 0x5, IMG_NUM_FORMAT_RESERVED_6 = 0x6, IMG_NUM_FORMAT_FLOAT = 0x7, IMG_NUM_FORMAT_RESERVED_8 = 0x8, IMG_NUM_FORMAT_SRGB = 0x9, IMG_NUM_FORMAT_RESERVED_10 = 0xa, IMG_NUM_FORMAT_RESERVED_11 = 0xb, IMG_NUM_FORMAT_RESERVED_12 = 0xc, IMG_NUM_FORMAT_RESERVED_13 = 0xd, IMG_NUM_FORMAT_RESERVED_14 = 0xe, IMG_NUM_FORMAT_RESERVED_15 = 0xf, } IMG_NUM_FORMAT; typedef enum TileType { ARRAY_COLOR_TILE = 0x0, ARRAY_DEPTH_TILE = 0x1, } TileType; typedef enum NonDispTilingOrder { ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, } NonDispTilingOrder; typedef enum MicroTileMode { ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, ADDR_SURF_THIN_MICRO_TILING = 0x1, ADDR_SURF_DEPTH_MICRO_TILING = 0x2, ADDR_SURF_ROTATED_MICRO_TILING = 0x3, ADDR_SURF_THICK_MICRO_TILING = 0x4, } MicroTileMode; typedef enum TileSplit { ADDR_SURF_TILE_SPLIT_64B = 0x0, ADDR_SURF_TILE_SPLIT_128B = 0x1, ADDR_SURF_TILE_SPLIT_256B = 0x2, ADDR_SURF_TILE_SPLIT_512B = 0x3, ADDR_SURF_TILE_SPLIT_1KB = 0x4, ADDR_SURF_TILE_SPLIT_2KB = 0x5, ADDR_SURF_TILE_SPLIT_4KB = 0x6, } TileSplit; typedef enum SampleSplit { ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, } SampleSplit; typedef enum PipeConfig { ADDR_SURF_P2 = 0x0, ADDR_SURF_P2_RESERVED0 = 0x1, ADDR_SURF_P2_RESERVED1 = 0x2, ADDR_SURF_P2_RESERVED2 = 0x3, ADDR_SURF_P4_8x16 = 0x4, ADDR_SURF_P4_16x16 = 0x5, ADDR_SURF_P4_16x32 = 0x6, ADDR_SURF_P4_32x32 = 0x7, ADDR_SURF_P8_16x16_8x16 = 0x8, ADDR_SURF_P8_16x32_8x16 = 0x9, ADDR_SURF_P8_32x32_8x16 = 0xa, ADDR_SURF_P8_16x32_16x16 = 0xb, ADDR_SURF_P8_32x32_16x16 = 0xc, ADDR_SURF_P8_32x32_16x32 = 0xd, ADDR_SURF_P8_32x64_32x32 = 0xe, ADDR_SURF_P8_RESERVED0 = 0xf, ADDR_SURF_P16_32x32_8x16 = 0x10, ADDR_SURF_P16_32x32_16x16 = 0x11, } PipeConfig; typedef enum NumBanks { ADDR_SURF_2_BANK = 0x0, ADDR_SURF_4_BANK = 0x1, ADDR_SURF_8_BANK = 0x2, ADDR_SURF_16_BANK = 0x3, } NumBanks; typedef enum BankWidth { ADDR_SURF_BANK_WIDTH_1 = 0x0, ADDR_SURF_BANK_WIDTH_2 = 0x1, ADDR_SURF_BANK_WIDTH_4 = 0x2, ADDR_SURF_BANK_WIDTH_8 = 0x3, } BankWidth; typedef enum BankHeight { ADDR_SURF_BANK_HEIGHT_1 = 0x0, ADDR_SURF_BANK_HEIGHT_2 = 0x1, ADDR_SURF_BANK_HEIGHT_4 = 0x2, ADDR_SURF_BANK_HEIGHT_8 = 0x3, } BankHeight; typedef enum BankWidthHeight { ADDR_SURF_BANK_WH_1 = 0x0, ADDR_SURF_BANK_WH_2 = 0x1, ADDR_SURF_BANK_WH_4 = 0x2, ADDR_SURF_BANK_WH_8 = 0x3, } BankWidthHeight; typedef enum MacroTileAspect { ADDR_SURF_MACRO_ASPECT_1 = 0x0, ADDR_SURF_MACRO_ASPECT_2 = 0x1, ADDR_SURF_MACRO_ASPECT_4 = 0x2, ADDR_SURF_MACRO_ASPECT_8 = 0x3, } MacroTileAspect; typedef enum GATCL1RequestType { GATCL1_TYPE_NORMAL = 0x0, GATCL1_TYPE_SHOOTDOWN = 0x1, GATCL1_TYPE_BYPASS = 0x2, } GATCL1RequestType; typedef enum TCC_CACHE_POLICIES { TCC_CACHE_POLICY_LRU = 0x0, TCC_CACHE_POLICY_STREAM = 0x1, } TCC_CACHE_POLICIES; typedef enum MTYPE { MTYPE_NC_NV = 0x0, MTYPE_NC = 0x1, MTYPE_CC = 0x2, MTYPE_UC = 0x3, } MTYPE; typedef enum PERFMON_COUNTER_MODE { PERFMON_COUNTER_MODE_ACCUM = 0x0, PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, PERFMON_COUNTER_MODE_MAX = 0x2, PERFMON_COUNTER_MODE_DIRTY = 0x3, PERFMON_COUNTER_MODE_SAMPLE = 0x4, PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, PERFMON_COUNTER_MODE_RESERVED = 0xf, } PERFMON_COUNTER_MODE; typedef enum PERFMON_SPM_MODE { PERFMON_SPM_MODE_OFF = 0x0, PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, PERFMON_SPM_MODE_RESERVED_5 = 0x5, PERFMON_SPM_MODE_RESERVED_6 = 0x6, PERFMON_SPM_MODE_RESERVED_7 = 0x7, PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, } PERFMON_SPM_MODE; typedef enum SurfaceTiling { ARRAY_LINEAR = 0x0, ARRAY_TILED = 0x1, } SurfaceTiling; typedef enum SurfaceArray { ARRAY_1D = 0x0, ARRAY_2D = 0x1, ARRAY_3D = 0x2, ARRAY_3D_SLICE = 0x3, } SurfaceArray; typedef enum ColorArray { ARRAY_2D_ALT_COLOR = 0x0, ARRAY_2D_COLOR = 0x1, ARRAY_3D_SLICE_COLOR = 0x3, } ColorArray; typedef enum DepthArray { ARRAY_2D_ALT_DEPTH = 0x0, ARRAY_2D_DEPTH = 0x1, } DepthArray; typedef enum ENUM_NUM_SIMD_PER_CU { NUM_SIMD_PER_CU = 0x4, } ENUM_NUM_SIMD_PER_CU; typedef enum MEM_PWR_FORCE_CTRL { NO_FORCE_REQUEST = 0x0, FORCE_LIGHT_SLEEP_REQUEST = 0x1, FORCE_DEEP_SLEEP_REQUEST = 0x2, FORCE_SHUT_DOWN_REQUEST = 0x3, } MEM_PWR_FORCE_CTRL; typedef enum MEM_PWR_FORCE_CTRL2 { NO_FORCE_REQ = 0x0, FORCE_LIGHT_SLEEP_REQ = 0x1, } MEM_PWR_FORCE_CTRL2; typedef enum MEM_PWR_DIS_CTRL { ENABLE_MEM_PWR_CTRL = 0x0, DISABLE_MEM_PWR_CTRL = 0x1, } MEM_PWR_DIS_CTRL; typedef enum MEM_PWR_SEL_CTRL { DYNAMIC_SHUT_DOWN_ENABLE = 0x0, DYNAMIC_DEEP_SLEEP_ENABLE = 0x1, DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2, } MEM_PWR_SEL_CTRL; typedef enum MEM_PWR_SEL_CTRL2 { DYNAMIC_DEEP_SLEEP_EN = 0x0, DYNAMIC_LIGHT_SLEEP_EN = 0x1, } MEM_PWR_SEL_CTRL2; #endif /* GMC_8_1_ENUM_H */
// SPDX-License-Identifier: GPL-2.0+ // // AMD ALSA SoC PCM Driver // // Copyright (C) 2021 Advanced Micro Devices, Inc. All rights reserved. #include <linux/platform_device.h> #include <linux/module.h> #include <linux/err.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dai.h> #include "acp5x.h" #define DRV_NAME "acp5x_i2s_dma" static const struct snd_pcm_hardware acp5x_pcm_hardware_playback = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .rate_min = 8000, .rate_max = 96000, .buffer_bytes_max = PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE, .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE, .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE, .periods_min = PLAYBACK_MIN_NUM_PERIODS, .periods_max = PLAYBACK_MAX_NUM_PERIODS, }; static const struct snd_pcm_hardware acp5x_pcm_hardware_capture = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .rate_min = 8000, .rate_max = 96000, .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE, .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE, .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE, .periods_min = CAPTURE_MIN_NUM_PERIODS, .periods_max = CAPTURE_MAX_NUM_PERIODS, }; static irqreturn_t i2s_irq_handler(int irq, void *dev_id) { struct i2s_dev_data *vg_i2s_data; u16 irq_flag; u32 val; vg_i2s_data = dev_id; if (!vg_i2s_data) return IRQ_NONE; irq_flag = 0; val = acp_readl(vg_i2s_data->acp5x_base + ACP_EXTERNAL_INTR_STAT); if ((val & BIT(HS_TX_THRESHOLD)) && vg_i2s_data->play_stream) { acp_writel(BIT(HS_TX_THRESHOLD), vg_i2s_data->acp5x_base + ACP_EXTERNAL_INTR_STAT); snd_pcm_period_elapsed(vg_i2s_data->play_stream); irq_flag = 1; } if ((val & BIT(I2S_TX_THRESHOLD)) && vg_i2s_data->i2ssp_play_stream) { acp_writel(BIT(I2S_TX_THRESHOLD), vg_i2s_data->acp5x_base + ACP_EXTERNAL_INTR_STAT); snd_pcm_period_elapsed(vg_i2s_data->i2ssp_play_stream); irq_flag = 1; } if ((val & BIT(HS_RX_THRESHOLD)) && vg_i2s_data->capture_stream) { acp_writel(BIT(HS_RX_THRESHOLD), vg_i2s_data->acp5x_base + ACP_EXTERNAL_INTR_STAT); snd_pcm_period_elapsed(vg_i2s_data->capture_stream); irq_flag = 1; } if ((val & BIT(I2S_RX_THRESHOLD)) && vg_i2s_data->i2ssp_capture_stream) { acp_writel(BIT(I2S_RX_THRESHOLD), vg_i2s_data->acp5x_base + ACP_EXTERNAL_INTR_STAT); snd_pcm_period_elapsed(vg_i2s_data->i2ssp_capture_stream); irq_flag = 1; } if (irq_flag) return IRQ_HANDLED; else return IRQ_NONE; } static void config_acp5x_dma(struct i2s_stream_instance *rtd, int direction) { u16 page_idx; u32 low, high, val, acp_fifo_addr, reg_fifo_addr; u32 reg_dma_size, reg_fifo_size; dma_addr_t addr; addr = rtd->dma_addr; if (direction == SNDRV_PCM_STREAM_PLAYBACK) { switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: val = ACP_SRAM_HS_PB_PTE_OFFSET; break; case I2S_SP_INSTANCE: default: val = ACP_SRAM_SP_PB_PTE_OFFSET; } } else { switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: val = ACP_SRAM_HS_CP_PTE_OFFSET; break; case I2S_SP_INSTANCE: default: val = ACP_SRAM_SP_CP_PTE_OFFSET; } } /* Group Enable */ acp_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp5x_base + ACPAXI2AXI_ATU_BASE_ADDR_GRP_1); acp_writel(PAGE_SIZE_4K_ENABLE, rtd->acp5x_base + ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1); for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) { /* Load the low address of page int ACP SRAM through SRBM */ low = lower_32_bits(addr); high = upper_32_bits(addr); acp_writel(low, rtd->acp5x_base + ACP_SCRATCH_REG_0 + val); high |= BIT(31); acp_writel(high, rtd->acp5x_base + ACP_SCRATCH_REG_0 + val + 4); /* Move to next physically contiguous page */ val += 8; addr += PAGE_SIZE; } if (direction == SNDRV_PCM_STREAM_PLAYBACK) { switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: reg_dma_size = ACP_HS_TX_DMA_SIZE; acp_fifo_addr = ACP_SRAM_PTE_OFFSET + HS_PB_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_HS_TX_FIFOADDR; reg_fifo_size = ACP_HS_TX_FIFOSIZE; acp_writel(I2S_HS_TX_MEM_WINDOW_START, rtd->acp5x_base + ACP_HS_TX_RINGBUFADDR); break; case I2S_SP_INSTANCE: default: reg_dma_size = ACP_I2S_TX_DMA_SIZE; acp_fifo_addr = ACP_SRAM_PTE_OFFSET + SP_PB_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_I2S_TX_FIFOADDR; reg_fifo_size = ACP_I2S_TX_FIFOSIZE; acp_writel(I2S_SP_TX_MEM_WINDOW_START, rtd->acp5x_base + ACP_I2S_TX_RINGBUFADDR); } } else { switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: reg_dma_size = ACP_HS_RX_DMA_SIZE; acp_fifo_addr = ACP_SRAM_PTE_OFFSET + HS_CAPT_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_HS_RX_FIFOADDR; reg_fifo_size = ACP_HS_RX_FIFOSIZE; acp_writel(I2S_HS_RX_MEM_WINDOW_START, rtd->acp5x_base + ACP_HS_RX_RINGBUFADDR); break; case I2S_SP_INSTANCE: default: reg_dma_size = ACP_I2S_RX_DMA_SIZE; acp_fifo_addr = ACP_SRAM_PTE_OFFSET + SP_CAPT_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_I2S_RX_FIFOADDR; reg_fifo_size = ACP_I2S_RX_FIFOSIZE; acp_writel(I2S_SP_RX_MEM_WINDOW_START, rtd->acp5x_base + ACP_I2S_RX_RINGBUFADDR); } } acp_writel(DMA_SIZE, rtd->acp5x_base + reg_dma_size); acp_writel(acp_fifo_addr, rtd->acp5x_base + reg_fifo_addr); acp_writel(FIFO_SIZE, rtd->acp5x_base + reg_fifo_size); acp_writel(BIT(I2S_RX_THRESHOLD) | BIT(HS_RX_THRESHOLD) | BIT(I2S_TX_THRESHOLD) | BIT(HS_TX_THRESHOLD), rtd->acp5x_base + ACP_EXTERNAL_INTR_CNTL); } static int acp5x_dma_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; struct snd_soc_pcm_runtime *prtd; struct i2s_dev_data *adata; struct i2s_stream_instance *i2s_data; int ret; runtime = substream->runtime; prtd = snd_soc_substream_to_rtd(substream); component = snd_soc_rtdcom_lookup(prtd, DRV_NAME); adata = dev_get_drvdata(component->dev); i2s_data = kzalloc(sizeof(*i2s_data), GFP_KERNEL); if (!i2s_data) return -ENOMEM; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) runtime->hw = acp5x_pcm_hardware_playback; else runtime->hw = acp5x_pcm_hardware_capture; ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { dev_err(component->dev, "set integer constraint failed\n"); kfree(i2s_data); return ret; } i2s_data->acp5x_base = adata->acp5x_base; runtime->private_data = i2s_data; return ret; } static int acp5x_dma_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct i2s_stream_instance *rtd; struct snd_soc_pcm_runtime *prtd; struct snd_soc_card *card; struct acp5x_platform_info *pinfo; struct i2s_dev_data *adata; u64 size; prtd = snd_soc_substream_to_rtd(substream); card = prtd->card; pinfo = snd_soc_card_get_drvdata(card); adata = dev_get_drvdata(component->dev); rtd = substream->runtime->private_data; if (!rtd) return -EINVAL; if (pinfo) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { rtd->i2s_instance = pinfo->play_i2s_instance; switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: adata->play_stream = substream; break; case I2S_SP_INSTANCE: default: adata->i2ssp_play_stream = substream; } } else { rtd->i2s_instance = pinfo->cap_i2s_instance; switch (rtd->i2s_instance) { case I2S_HS_INSTANCE: adata->capture_stream = substream; break; case I2S_SP_INSTANCE: default: adata->i2ssp_capture_stream = substream; } } } else { dev_err(component->dev, "pinfo failed\n"); return -EINVAL; } size = params_buffer_bytes(params); rtd->dma_addr = substream->runtime->dma_addr; rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT); config_acp5x_dma(rtd, substream->stream); return 0; } static snd_pcm_uframes_t acp5x_dma_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct i2s_stream_instance *rtd; u32 pos; u32 buffersize; u64 bytescount; rtd = substream->runtime->private_data; buffersize = frames_to_bytes(substream->runtime, substream->runtime->buffer_size); bytescount = acp_get_byte_count(rtd, substream->stream); if (bytescount > rtd->bytescount) bytescount -= rtd->bytescount; pos = do_div(bytescount, buffersize); return bytes_to_frames(substream->runtime, pos); } static int acp5x_dma_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { struct device *parent = component->dev->parent; snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, parent, MIN_BUFFER, MAX_BUFFER); return 0; } static int acp5x_dma_close(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *prtd; struct i2s_dev_data *adata; struct i2s_stream_instance *ins; prtd = snd_soc_substream_to_rtd(substream); component = snd_soc_rtdcom_lookup(prtd, DRV_NAME); adata = dev_get_drvdata(component->dev); ins = substream->runtime->private_data; if (!ins) return -EINVAL; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (ins->i2s_instance) { case I2S_HS_INSTANCE: adata->play_stream = NULL; break; case I2S_SP_INSTANCE: default: adata->i2ssp_play_stream = NULL; } } else { switch (ins->i2s_instance) { case I2S_HS_INSTANCE: adata->capture_stream = NULL; break; case I2S_SP_INSTANCE: default: adata->i2ssp_capture_stream = NULL; } } kfree(ins); return 0; } static const struct snd_soc_component_driver acp5x_i2s_component = { .name = DRV_NAME, .open = acp5x_dma_open, .close = acp5x_dma_close, .hw_params = acp5x_dma_hw_params, .pointer = acp5x_dma_pointer, .pcm_construct = acp5x_dma_new, }; static int acp5x_audio_probe(struct platform_device *pdev) { struct resource *res; struct i2s_dev_data *adata; unsigned int irqflags; int status; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "platform_data not retrieved\n"); return -ENODEV; } irqflags = *((unsigned int *)(pdev->dev.platform_data)); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "IORESOURCE_MEM FAILED\n"); return -ENODEV; } adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL); if (!adata) return -ENOMEM; adata->acp5x_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!adata->acp5x_base) return -ENOMEM; status = platform_get_irq(pdev, 0); if (status < 0) return status; adata->i2s_irq = status; dev_set_drvdata(&pdev->dev, adata); status = devm_snd_soc_register_component(&pdev->dev, &acp5x_i2s_component, NULL, 0); if (status) { dev_err(&pdev->dev, "Fail to register acp i2s component\n"); return status; } status = devm_request_irq(&pdev->dev, adata->i2s_irq, i2s_irq_handler, irqflags, "ACP5x_I2S_IRQ", adata); if (status) { dev_err(&pdev->dev, "ACP5x I2S IRQ request failed\n"); return status; } pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; } static void acp5x_audio_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); } static int __maybe_unused acp5x_pcm_resume(struct device *dev) { struct i2s_dev_data *adata; struct i2s_stream_instance *rtd; u32 val; adata = dev_get_drvdata(dev); if (adata->play_stream && adata->play_stream->runtime) { rtd = adata->play_stream->runtime->private_data; config_acp5x_dma(rtd, SNDRV_PCM_STREAM_PLAYBACK); acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_HSTDM_ITER); if (adata->tdm_mode == TDM_ENABLE) { acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_HSTDM_TXFRMT); val = acp_readl(adata->acp5x_base + ACP_HSTDM_ITER); acp_writel(val | 0x2, adata->acp5x_base + ACP_HSTDM_ITER); } } if (adata->i2ssp_play_stream && adata->i2ssp_play_stream->runtime) { rtd = adata->i2ssp_play_stream->runtime->private_data; config_acp5x_dma(rtd, SNDRV_PCM_STREAM_PLAYBACK); acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_I2STDM_ITER); if (adata->tdm_mode == TDM_ENABLE) { acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_I2STDM_TXFRMT); val = acp_readl(adata->acp5x_base + ACP_I2STDM_ITER); acp_writel(val | 0x2, adata->acp5x_base + ACP_I2STDM_ITER); } } if (adata->capture_stream && adata->capture_stream->runtime) { rtd = adata->capture_stream->runtime->private_data; config_acp5x_dma(rtd, SNDRV_PCM_STREAM_CAPTURE); acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_HSTDM_IRER); if (adata->tdm_mode == TDM_ENABLE) { acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_HSTDM_RXFRMT); val = acp_readl(adata->acp5x_base + ACP_HSTDM_IRER); acp_writel(val | 0x2, adata->acp5x_base + ACP_HSTDM_IRER); } } if (adata->i2ssp_capture_stream && adata->i2ssp_capture_stream->runtime) { rtd = adata->i2ssp_capture_stream->runtime->private_data; config_acp5x_dma(rtd, SNDRV_PCM_STREAM_CAPTURE); acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_I2STDM_IRER); if (adata->tdm_mode == TDM_ENABLE) { acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_I2STDM_RXFRMT); val = acp_readl(adata->acp5x_base + ACP_I2STDM_IRER); acp_writel(val | 0x2, adata->acp5x_base + ACP_I2STDM_IRER); } } acp_writel(1, adata->acp5x_base + ACP_EXTERNAL_INTR_ENB); return 0; } static int __maybe_unused acp5x_pcm_suspend(struct device *dev) { struct i2s_dev_data *adata; adata = dev_get_drvdata(dev); acp_writel(0, adata->acp5x_base + ACP_EXTERNAL_INTR_ENB); return 0; } static int __maybe_unused acp5x_pcm_runtime_resume(struct device *dev) { struct i2s_dev_data *adata; adata = dev_get_drvdata(dev); acp_writel(1, adata->acp5x_base + ACP_EXTERNAL_INTR_ENB); return 0; } static const struct dev_pm_ops acp5x_pm_ops = { SET_RUNTIME_PM_OPS(acp5x_pcm_suspend, acp5x_pcm_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(acp5x_pcm_suspend, acp5x_pcm_resume) }; static struct platform_driver acp5x_dma_driver = { .probe = acp5x_audio_probe, .remove = acp5x_audio_remove, .driver = { .name = "acp5x_i2s_dma", .pm = &acp5x_pm_ops, }, }; module_platform_driver(acp5x_dma_driver); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("AMD ACP 5.x PCM Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME);
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003, 04, 05 Ralf Baechle ([email protected]) * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2008 Thiemo Seufer * Copyright (C) 2012 MIPS Technologies, Inc. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <asm/bugs.h> #include <asm/cacheops.h> #include <asm/cpu-type.h> #include <asm/inst.h> #include <asm/io.h> #include <asm/page.h> #include <asm/prefetch.h> #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> #include <asm/regdef.h> #include <asm/cpu.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_dma.h> #endif #include <asm/uasm.h> /* Handle labels (which must be positive integers). */ enum label_id { label_clear_nopref = 1, label_clear_pref, label_copy_nopref, label_copy_pref_both, label_copy_pref_store, }; UASM_L_LA(_clear_nopref) UASM_L_LA(_clear_pref) UASM_L_LA(_copy_nopref) UASM_L_LA(_copy_pref_both) UASM_L_LA(_copy_pref_store) /* We need one branch and therefore one relocation per target label. */ static struct uasm_label labels[5]; static struct uasm_reloc relocs[5]; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) /* * R6 has a limited offset of the pref instruction. * Skip it if the offset is more than 9 bits. */ #define _uasm_i_pref(a, b, c, d) \ do { \ if (cpu_has_mips_r6) { \ if (c <= 0xff && c >= -0x100) \ uasm_i_pref(a, b, c, d);\ } else { \ uasm_i_pref(a, b, c, d); \ } \ } while(0) static int pref_bias_clear_store; static int pref_bias_copy_load; static int pref_bias_copy_store; static u32 pref_src_mode; static u32 pref_dst_mode; static int clear_word_size; static int copy_word_size; static int half_clear_loop_size; static int half_copy_loop_size; static int cache_line_size; #define cache_line_mask() (cache_line_size - 1) static inline void pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) { if (cpu_has_64bit_gp_regs && IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) && r4k_daddiu_bug()) { if (off > 0x7fff) { uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); } else uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off); uasm_i_daddu(buf, reg1, reg2, GPR_T9); } else { if (off > 0x7fff) { uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); UASM_i_ADDU(buf, reg1, reg2, GPR_T9); } else UASM_i_ADDIU(buf, reg1, reg2, off); } } static void set_prefetch_parameters(void) { if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) clear_word_size = 8; else clear_word_size = 4; if (cpu_has_64bit_gp_regs) copy_word_size = 8; else copy_word_size = 4; /* * The pref's used here are using "streaming" hints, which cause the * copied data to be kicked out of the cache sooner. A page copy often * ends up copying a lot more data than is commonly used, so this seems * to make sense in terms of reducing cache pollution, but I've no real * performance data to back this up. */ if (cpu_has_prefetch) { /* * XXX: Most prefetch bias values in here are based on * guesswork. */ cache_line_size = cpu_dcache_line_size(); switch (current_cpu_type()) { case CPU_R5500: case CPU_TX49XX: /* These processors only support the Pref_Load. */ pref_bias_copy_load = 256; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: case CPU_R16000: /* * Those values have been experimentally tuned for an * Origin 200. */ pref_bias_clear_store = 512; pref_bias_copy_load = 256; pref_bias_copy_store = 256; pref_src_mode = Pref_LoadStreamed; pref_dst_mode = Pref_StoreStreamed; break; case CPU_SB1: case CPU_SB1A: pref_bias_clear_store = 128; pref_bias_copy_load = 128; pref_bias_copy_store = 128; /* * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed * hints are broken. */ if (current_cpu_type() == CPU_SB1 && (current_cpu_data.processor_id & 0xff) < 0x02) { pref_src_mode = Pref_Load; pref_dst_mode = Pref_Store; } else { pref_src_mode = Pref_LoadStreamed; pref_dst_mode = Pref_StoreStreamed; } break; case CPU_LOONGSON64: /* Loongson-3 only support the Pref_Load/Pref_Store. */ pref_bias_clear_store = 128; pref_bias_copy_load = 128; pref_bias_copy_store = 128; pref_src_mode = Pref_Load; pref_dst_mode = Pref_Store; break; default: pref_bias_clear_store = 128; pref_bias_copy_load = 256; pref_bias_copy_store = 128; pref_src_mode = Pref_LoadStreamed; if (cpu_has_mips_r6) /* * Bit 30 (Pref_PrepareForStore) has been * removed from MIPS R6. Use bit 5 * (Pref_StoreStreamed). */ pref_dst_mode = Pref_StoreStreamed; else pref_dst_mode = Pref_PrepareForStore; break; } } else { if (cpu_has_cache_cdex_s) cache_line_size = cpu_scache_line_size(); else if (cpu_has_cache_cdex_p) cache_line_size = cpu_dcache_line_size(); } /* * Too much unrolling will overflow the available space in * clear_space_array / copy_page_array. */ half_clear_loop_size = min(16 * clear_word_size, max(cache_line_size >> 1, 4 * clear_word_size)); half_copy_loop_size = min(16 * copy_word_size, max(cache_line_size >> 1, 4 * copy_word_size)); } static void build_clear_store(u32 **buf, int off) { if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { uasm_i_sd(buf, GPR_ZERO, off, GPR_A0); } else { uasm_i_sw(buf, GPR_ZERO, off, GPR_A0); } } static inline void build_clear_pref(u32 **buf, int off) { if (off & cache_line_mask()) return; if (pref_bias_clear_store) { _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, GPR_A0); } else if (cache_line_size == (half_clear_loop_size << 1)) { if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); } else if (cpu_has_cache_cdex_p) { if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); } } } extern u32 __clear_page_start; extern u32 __clear_page_end; extern u32 __copy_page_start; extern u32 __copy_page_end; void build_clear_page(void) { int off; u32 *buf = &__clear_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; static atomic_t run_once = ATOMIC_INIT(0); if (atomic_xchg(&run_once, 1)) { return; } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - The prefetch bias is a multiple of 2 words. * - The prefetch bias is less than one page. */ BUG_ON(pref_bias_clear_store % (2 * clear_word_size)); BUG_ON(PAGE_SIZE < pref_bias_clear_store); off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) pg_addiu(&buf, GPR_A2, GPR_A0, off); else uasm_i_ori(&buf, GPR_A2, GPR_A0, off); if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; } uasm_l_clear_pref(&l, buf); do { build_clear_pref(&buf, off); build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { if (off == -clear_word_size) uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } uasm_i_jr(&buf, GPR_RA); uasm_i_nop(&buf); BUG_ON(buf > &__clear_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", (u32)(buf - &__clear_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - &__clear_page_start); i++) pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); pr_debug("\t.set pop\n"); } static void build_copy_load(u32 **buf, int reg, int off) { if (cpu_has_64bit_gp_regs) { uasm_i_ld(buf, reg, off, GPR_A1); } else { uasm_i_lw(buf, reg, off, GPR_A1); } } static void build_copy_store(u32 **buf, int reg, int off) { if (cpu_has_64bit_gp_regs) { uasm_i_sd(buf, reg, off, GPR_A0); } else { uasm_i_sw(buf, reg, off, GPR_A0); } } static inline void build_copy_load_pref(u32 **buf, int off) { if (off & cache_line_mask()) return; if (pref_bias_copy_load) _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1); } static inline void build_copy_store_pref(u32 **buf, int off) { if (off & cache_line_mask()) return; if (pref_bias_copy_store) { _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, GPR_A0); } else if (cache_line_size == (half_copy_loop_size << 1)) { if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); } else if (cpu_has_cache_cdex_p) { if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); } } } void build_copy_page(void) { int off; u32 *buf = &__copy_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; static atomic_t run_once = ATOMIC_INIT(0); if (atomic_xchg(&run_once, 1)) { return; } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - All prefetch biases are multiples of 8 words. * - The prefetch biases are less than one page. * - The store prefetch bias isn't greater than the load * prefetch bias. */ BUG_ON(pref_bias_copy_load % (8 * copy_word_size)); BUG_ON(pref_bias_copy_store % (8 * copy_word_size)); BUG_ON(PAGE_SIZE < pref_bias_copy_load); BUG_ON(pref_bias_copy_store > pref_bias_copy_load); off = PAGE_SIZE - pref_bias_copy_load; if (off > 0xffff || !pref_bias_copy_load) pg_addiu(&buf, GPR_A2, GPR_A0, off); else uasm_i_ori(&buf, GPR_A2, GPR_A0, off); if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; while (off) { build_copy_load_pref(&buf, -off); off -= cache_line_size; } off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * cache_line_size : 0; while (off) { build_copy_store_pref(&buf, -off); off -= cache_line_size; } uasm_l_copy_pref_both(&l, buf); do { build_copy_load_pref(&buf, off); build_copy_load(&buf, GPR_T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_copy_load_pref(&buf, off); build_copy_load(&buf, GPR_T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); if (pref_bias_copy_load - pref_bias_copy_store) { pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_load - pref_bias_copy_store); uasm_l_copy_pref_store(&l, buf); off = 0; do { build_copy_load(&buf, GPR_T0, off); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_copy_load(&buf, GPR_T0, off); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_store); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } if (pref_bias_copy_store) { pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store); uasm_l_copy_nopref(&l, buf); off = 0; do { build_copy_load(&buf, GPR_T0, off); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store(&buf, GPR_T0, off); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_copy_load(&buf, GPR_T0, off); build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store(&buf, GPR_T0, off); build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_nopref); build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } uasm_i_jr(&buf, GPR_RA); uasm_i_nop(&buf); BUG_ON(buf > &__copy_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized copy page handler (%u instructions).\n", (u32)(buf - &__copy_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - &__copy_page_start); i++) pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]); pr_debug("\t.set pop\n"); } #ifdef CONFIG_SIBYTE_DMA_PAGEOPS extern void clear_page_cpu(void *page); extern void copy_page_cpu(void *to, void *from); /* * Pad descriptors to cacheline, since each is exclusively owned by a * particular CPU. */ struct dmadscr { u64 dscr_a; u64 dscr_b; u64 pad_a; u64 pad_b; } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS]; void clear_page(void *page) { u64 to_phys = CPHYSADDR((unsigned long)page); unsigned int cpu = smp_processor_id(); /* if the page is not in KSEG0, use old way */ if ((long)KSEGX((unsigned long)page) != (long)CKSEG0) return clear_page_cpu(page); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); /* * Don't really want to do it this way, but there's no * reliable way to delay completion detection. */ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) & M_DM_DSCR_BASE_INTERRUPT)) ; __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); } EXPORT_SYMBOL(clear_page); void copy_page(void *to, void *from) { u64 from_phys = CPHYSADDR((unsigned long)from); u64 to_phys = CPHYSADDR((unsigned long)to); unsigned int cpu = smp_processor_id(); /* if any page is not in KSEG0, use old way */ if ((long)KSEGX((unsigned long)to) != (long)CKSEG0 || (long)KSEGX((unsigned long)from) != (long)CKSEG0) return copy_page_cpu(to, from); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); /* * Don't really want to do it this way, but there's no * reliable way to delay completion detection. */ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) & M_DM_DSCR_BASE_INTERRUPT)) ; __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); } EXPORT_SYMBOL(copy_page); #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BH_H #define _LINUX_BH_H #include <linux/instruction_pointer.h> #include <linux/preempt.h> #if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { preempt_count_add(cnt); barrier(); } #endif static inline void local_bh_disable(void) { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } extern void _local_bh_enable(void); extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); static inline void local_bh_enable_ip(unsigned long ip) { __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); } static inline void local_bh_enable(void) { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } #ifdef CONFIG_PREEMPT_RT extern bool local_bh_blocked(void); #else static inline bool local_bh_blocked(void) { return false; } #endif #endif /* _LINUX_BH_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * ALSA SoC TPA6130A2 amplifier driver * * Copyright (C) Nokia Corporation * * Author: Peter Ujfalusi <[email protected]> */ #ifndef __TPA6130A2_H__ #define __TPA6130A2_H__ /* Register addresses */ #define TPA6130A2_REG_CONTROL 0x01 #define TPA6130A2_REG_VOL_MUTE 0x02 #define TPA6130A2_REG_OUT_IMPEDANCE 0x03 #define TPA6130A2_REG_VERSION 0x04 /* Register bits */ /* TPA6130A2_REG_CONTROL (0x01) */ #define TPA6130A2_SWS_SHIFT 0 #define TPA6130A2_SWS (0x01 << TPA6130A2_SWS_SHIFT) #define TPA6130A2_TERMAL (0x01 << 1) #define TPA6130A2_MODE(x) (x << 4) #define TPA6130A2_MODE_STEREO (0x00) #define TPA6130A2_MODE_DUAL_MONO (0x01) #define TPA6130A2_MODE_BRIDGE (0x02) #define TPA6130A2_MODE_MASK (0x03) #define TPA6130A2_HP_EN_R_SHIFT 6 #define TPA6130A2_HP_EN_R (0x01 << TPA6130A2_HP_EN_R_SHIFT) #define TPA6130A2_HP_EN_L_SHIFT 7 #define TPA6130A2_HP_EN_L (0x01 << TPA6130A2_HP_EN_L_SHIFT) /* TPA6130A2_REG_VOL_MUTE (0x02) */ #define TPA6130A2_VOLUME(x) ((x & 0x3f) << 0) #define TPA6130A2_MUTE_R (0x01 << 6) #define TPA6130A2_MUTE_L (0x01 << 7) /* TPA6130A2_REG_OUT_IMPEDANCE (0x03) */ #define TPA6130A2_HIZ_R (0x01 << 0) #define TPA6130A2_HIZ_L (0x01 << 1) /* TPA6130A2_REG_VERSION (0x04) */ #define TPA6130A2_VERSION_MASK (0x0f) #endif /* __TPA6130A2_H__ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <[email protected]> * Abramo Bagnara <[email protected]> */ #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #include "pcm_local.h" #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define CREATE_TRACE_POINTS #include "pcm_trace.h" #else #define trace_hwptr(substream, pos, in_interrupt) #define trace_xrun(substream) #define trace_hw_ptr_error(substream, reason) #define trace_applptr(substream, prev, curr) #endif static int fill_silence_frames(struct snd_pcm_substream *substream, snd_pcm_uframes_t off, snd_pcm_uframes_t frames); static inline void update_silence_vars(struct snd_pcm_runtime *runtime, snd_pcm_uframes_t ptr, snd_pcm_uframes_t new_ptr) { snd_pcm_sframes_t delta; delta = new_ptr - ptr; if (delta == 0) return; if (delta < 0) delta += runtime->boundary; if ((snd_pcm_uframes_t)delta < runtime->silence_filled) runtime->silence_filled -= delta; else runtime->silence_filled = 0; runtime->silence_start = new_ptr; } /* * fill ring buffer with silence * runtime->silence_start: starting pointer to silence area * runtime->silence_filled: size filled with silence * runtime->silence_threshold: threshold from application * runtime->silence_size: maximal size from application * * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately */ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames, ofs, transfer; int err; if (runtime->silence_size < runtime->boundary) { snd_pcm_sframes_t noise_dist; snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); update_silence_vars(runtime, runtime->silence_start, appl_ptr); /* initialization outside pointer updates */ if (new_hw_ptr == ULONG_MAX) new_hw_ptr = runtime->status->hw_ptr; /* get hw_avail with the boundary crossing */ noise_dist = appl_ptr - new_hw_ptr; if (noise_dist < 0) noise_dist += runtime->boundary; /* total noise distance */ noise_dist += runtime->silence_filled; if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) return; frames = runtime->silence_threshold - noise_dist; if (frames > runtime->silence_size) frames = runtime->silence_size; } else { /* * This filling mode aims at free-running mode (used for example by dmix), * which doesn't update the application pointer. */ snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr; if (new_hw_ptr == ULONG_MAX) { /* * Initialization, fill the whole unused buffer with silence. * * Usually, this is entered while stopped, before data is queued, * so both pointers are expected to be zero. */ snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr; if (avail < 0) avail += runtime->boundary; /* * In free-running mode, appl_ptr will be zero even while running, * so we end up with a huge number. There is no useful way to * handle this, so we just clear the whole buffer. */ runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail; runtime->silence_start = hw_ptr; } else { /* Silence the just played area immediately */ update_silence_vars(runtime, hw_ptr, new_hw_ptr); } /* * In this mode, silence_filled actually includes the valid * sample data from the user. */ frames = runtime->buffer_size - runtime->silence_filled; } if (snd_BUG_ON(frames > runtime->buffer_size)) return; if (frames == 0) return; ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size; do { transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; err = fill_silence_frames(substream, ofs, transfer); snd_BUG_ON(err < 0); runtime->silence_filled += transfer; frames -= transfer; ofs = 0; } while (frames > 0); snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE); } #ifdef CONFIG_SND_DEBUG void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len) { snprintf(name, len, "pcmC%dD%d%c:%d", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number); } EXPORT_SYMBOL(snd_pcm_debug_name); #endif #define XRUN_DEBUG_BASIC (1<<0) #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define xrun_debug(substream, mask) \ ((substream)->pstr->xrun_debug & (mask)) #else #define xrun_debug(substream, mask) 0 #endif #define dump_stack_on_xrun(substream) do { \ if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ dump_stack(); \ } while (0) /* call with stream lock held */ void __snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; trace_xrun(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { struct timespec64 tstamp; snd_pcm_gettime(runtime, &tstamp); runtime->status->tstamp.tv_sec = tstamp.tv_sec; runtime->status->tstamp.tv_nsec = tstamp.tv_nsec; } snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_warn(substream->pcm, "XRUN: %s\n", name); dump_stack_on_xrun(substream); } #ifdef CONFIG_SND_PCM_XRUN_DEBUG substream->xrun_counter++; #endif } #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ do { \ trace_hw_ptr_error(substream, reason); \ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ (in_interrupt) ? 'Q' : 'P', ##args); \ dump_stack_on_xrun(substream); \ } \ } while (0) #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ #define hw_ptr_error(substream, fmt, args...) do { } while (0) #endif int snd_pcm_update_state(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime) { snd_pcm_uframes_t avail; avail = snd_pcm_avail(substream); if (avail > runtime->avail_max) runtime->avail_max = avail; if (runtime->state == SNDRV_PCM_STATE_DRAINING) { if (avail >= runtime->buffer_size) { snd_pcm_drain_done(substream); return -EPIPE; } } else { if (avail >= runtime->stop_threshold) { __snd_pcm_xrun(substream); return -EPIPE; } } if (runtime->twake) { if (avail >= runtime->twake) wake_up(&runtime->tsleep); } else if (avail >= runtime->control->avail_min) wake_up(&runtime->sleep); return 0; } static void update_audio_tstamp(struct snd_pcm_substream *substream, struct timespec64 *curr_tstamp, struct timespec64 *audio_tstamp) { struct snd_pcm_runtime *runtime = substream->runtime; u64 audio_frames, audio_nsecs; struct timespec64 driver_tstamp; if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) return; if (!(substream->ops->get_time_info) || (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { /* * provide audio timestamp derived from pointer position * add delay only if requested */ audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; if (runtime->audio_tstamp_config.report_delay) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) audio_frames -= runtime->delay; else audio_frames += runtime->delay; } audio_nsecs = div_u64(audio_frames * 1000000000LL, runtime->rate); *audio_tstamp = ns_to_timespec64(audio_nsecs); } if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec || runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) { runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec; runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec; runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec; runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec; } /* * re-take a driver timestamp to let apps detect if the reference tstamp * read by low-level hardware was provided with a delay */ snd_pcm_gettime(substream->runtime, &driver_tstamp); runtime->driver_tstamp = driver_tstamp; } static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, unsigned int in_interrupt) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t pos; snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; snd_pcm_sframes_t hdelta, delta; unsigned long jdelta; unsigned long curr_jiffies; struct timespec64 curr_tstamp; struct timespec64 audio_tstamp; int crossed_boundary = 0; old_hw_ptr = runtime->status->hw_ptr; /* * group pointer, time and jiffies reads to allow for more * accurate correlations/corrections. * The values are stored at the end of this routine after * corrections for hw_ptr position */ pos = substream->ops->pointer(substream); curr_jiffies = jiffies; if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { if ((substream->ops->get_time_info) && (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { substream->ops->get_time_info(substream, &curr_tstamp, &audio_tstamp, &runtime->audio_tstamp_config, &runtime->audio_tstamp_report); /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) snd_pcm_gettime(runtime, &curr_tstamp); } else snd_pcm_gettime(runtime, &curr_tstamp); } if (pos == SNDRV_PCM_POS_XRUN) { __snd_pcm_xrun(substream); return -EPIPE; } if (pos >= runtime->buffer_size) { if (printk_ratelimit()) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_err(substream->pcm, "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", name, pos, runtime->buffer_size, runtime->period_size); } pos = 0; } pos -= pos % runtime->min_align; trace_hwptr(substream, pos, in_interrupt); hw_base = runtime->hw_ptr_base; new_hw_ptr = hw_base + pos; if (in_interrupt) { /* we know that one period was processed */ /* delta = "expected next hw_ptr" for in_interrupt != 0 */ delta = runtime->hw_ptr_interrupt + runtime->period_size; if (delta > new_hw_ptr) { /* check for double acknowledged interrupts */ hdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; goto __delta; } } } /* new_hw_ptr might be lower than old_hw_ptr in case when */ /* pointer crosses the end of the ring buffer */ if (new_hw_ptr < old_hw_ptr) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; } __delta: delta = new_hw_ptr - old_hw_ptr; if (delta < 0) delta += runtime->boundary; if (runtime->no_period_wakeup) { snd_pcm_sframes_t xrun_threshold; /* * Without regular period interrupts, we have to check * the elapsed time to detect xruns. */ jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) goto no_delta_check; hdelta = jdelta - delta * HZ / runtime->rate; xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; while (hdelta > xrun_threshold) { delta += runtime->buffer_size; hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; hdelta -= runtime->hw_ptr_buffer_jiffies; } goto no_delta_check; } /* something must be really wrong */ if (delta >= runtime->buffer_size + runtime->period_size) { hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)pos, (long)new_hw_ptr, (long)old_hw_ptr); return 0; } /* Do jiffies check only in xrun_debug mode */ if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) goto no_jiffies_check; /* Skip the jiffies check for hardwares with BATCH flag. * Such hardware usually just increases the position at each IRQ, * thus it can't give any strange position. */ if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) goto no_jiffies_check; hdelta = delta; if (hdelta < runtime->delay) goto no_jiffies_check; hdelta -= runtime->delay; jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { delta = jdelta / (((runtime->period_size * HZ) / runtime->rate) + HZ/100); /* move new_hw_ptr according jiffies not pos variable */ new_hw_ptr = old_hw_ptr; hw_base = delta; /* use loop to avoid checks for delta overflows */ /* the delta value is small or zero in most cases */ while (delta > 0) { new_hw_ptr += runtime->period_size; if (new_hw_ptr >= runtime->boundary) { new_hw_ptr -= runtime->boundary; crossed_boundary--; } delta--; } /* align hw_base to buffer_size */ hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", (long)pos, (long)hdelta, (long)runtime->period_size, jdelta, ((hdelta * HZ) / runtime->rate), hw_base, (unsigned long)old_hw_ptr, (unsigned long)new_hw_ptr); /* reset values to proper state */ delta = 0; hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); } no_jiffies_check: if (delta > runtime->period_size + runtime->period_size / 2) { hw_ptr_error(substream, in_interrupt, "Lost interrupts?", "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)delta, (long)new_hw_ptr, (long)old_hw_ptr); } no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { runtime->hw_ptr_jiffies = curr_jiffies; update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, new_hw_ptr); if (in_interrupt) { delta = new_hw_ptr - runtime->hw_ptr_interrupt; if (delta < 0) delta += runtime->boundary; delta -= (snd_pcm_uframes_t)delta % runtime->period_size; runtime->hw_ptr_interrupt += delta; if (runtime->hw_ptr_interrupt >= runtime->boundary) runtime->hw_ptr_interrupt -= runtime->boundary; } runtime->hw_ptr_base = hw_base; runtime->status->hw_ptr = new_hw_ptr; runtime->hw_ptr_jiffies = curr_jiffies; if (crossed_boundary) { snd_BUG_ON(crossed_boundary != 1); runtime->hw_ptr_wrap += runtime->boundary; } update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return snd_pcm_update_state(substream, runtime); } /* CAUTION: call it with irq disabled */ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) { return snd_pcm_update_hw_ptr0(substream, 0); } /** * snd_pcm_set_ops - set the PCM operators * @pcm: the pcm instance * @direction: stream direction, SNDRV_PCM_STREAM_XXX * @ops: the operator table * * Sets the given PCM operators to the pcm instance. */ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, const struct snd_pcm_ops *ops) { struct snd_pcm_str *stream = &pcm->streams[direction]; struct snd_pcm_substream *substream; for (substream = stream->substream; substream != NULL; substream = substream->next) substream->ops = ops; } EXPORT_SYMBOL(snd_pcm_set_ops); /** * snd_pcm_set_sync_per_card - set the PCM sync id with card number * @substream: the pcm substream * @params: modified hardware parameters * @id: identifier (max 12 bytes) * @len: identifier length (max 12 bytes) * * Sets the PCM sync identifier for the card with zero padding. * * User space or any user should use this 16-byte identifier for a comparison only * to check if two IDs are similar or different. Special case is the identifier * containing only zeros. Interpretation for this combination is - empty (not set). * The contents of the identifier should not be interpreted in any other way. * * The synchronization ID must be unique per clock source (usually one sound card, * but multiple soundcard may use one PCM word clock source which means that they * are fully synchronized). * * This routine composes this ID using card number in first four bytes and * 12-byte additional ID. When other ID composition is used (e.g. for multiple * sound cards), make sure that the composition does not clash with this * composition scheme. */ void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, const unsigned char *id, unsigned int len) { *(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number); len = min(12, len); memcpy(params->sync + 4, id, len); memset(params->sync + 4 + len, 0, 12 - len); } EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card); /* * Standard ioctl routine */ static inline unsigned int div32(unsigned int a, unsigned int b, unsigned int *r) { if (b == 0) { *r = 0; return UINT_MAX; } *r = a % b; return a / b; } static inline unsigned int div_down(unsigned int a, unsigned int b) { if (b == 0) return UINT_MAX; return a / b; } static inline unsigned int div_up(unsigned int a, unsigned int b) { unsigned int r; unsigned int q; if (b == 0) return UINT_MAX; q = div32(a, b, &r); if (r) ++q; return q; } static inline unsigned int mul(unsigned int a, unsigned int b) { if (a == 0) return 0; if (div_down(UINT_MAX, a) < b) return UINT_MAX; return a * b; } static inline unsigned int muldiv32(unsigned int a, unsigned int b, unsigned int c, unsigned int *r) { u_int64_t n = (u_int64_t) a * b; if (c == 0) { *r = 0; return UINT_MAX; } n = div_u64_rem(n, c, r); if (n >= UINT_MAX) { *r = 0; return UINT_MAX; } return n; } /** * snd_interval_refine - refine the interval value of configurator * @i: the interval value to refine * @v: the interval value to refer to * * Refines the interval value with the reference value. * The interval is changed to the range satisfying both intervals. * The interval status (min, max, integer, etc.) are evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) { int changed = 0; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (i->min < v->min) { i->min = v->min; i->openmin = v->openmin; changed = 1; } else if (i->min == v->min && !i->openmin && v->openmin) { i->openmin = 1; changed = 1; } if (i->max > v->max) { i->max = v->max; i->openmax = v->openmax; changed = 1; } else if (i->max == v->max && !i->openmax && v->openmax) { i->openmax = 1; changed = 1; } if (!i->integer && v->integer) { i->integer = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } if (i->openmax) { i->max--; i->openmax = 0; } } else if (!i->openmin && !i->openmax && i->min == i->max) i->integer = 1; if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { const unsigned int last_max = i->max; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; if (i->openmin) i->max++; /* only exclude max value if also excluded before refine */ i->openmax = (i->openmax && i->max >= last_max); return 1; } static int snd_interval_refine_last(struct snd_interval *i) { const unsigned int last_min = i->min; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; if (i->openmax) i->min--; /* only exclude min value if also excluded before refine */ i->openmin = (i->openmin && i->min <= last_min); return 1; } void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = mul(a->min, b->min); c->openmin = (a->openmin || b->openmin); c->max = mul(a->max, b->max); c->openmax = (a->openmax || b->openmax); c->integer = (a->integer && b->integer); } /** * snd_interval_div - refine the interval value with division * @a: dividend * @b: divisor * @c: quotient * * c = a / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = div32(a->min, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = div32(a->max, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /** * snd_interval_muldivk - refine the interval value * @a: dividend 1 * @b: dividend 2 * @k: divisor (as integer) * @c: result * * c = a * b / k * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, unsigned int k, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, b->min, k, &r); c->openmin = (r || a->openmin || b->openmin); c->max = muldiv32(a->max, b->max, k, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmax); c->integer = 0; } /** * snd_interval_mulkdiv - refine the interval value * @a: dividend 1 * @k: dividend 2 (as integer) * @b: divisor * @c: result * * c = a * k / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, k, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = muldiv32(a->max, k, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /* ---- */ /** * snd_interval_ratnum - refine the interval value * @i: interval to refine * @rats_count: number of ratnum_t * @rats: ratnum_t array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_den; int best_diff; unsigned int k; struct snd_interval t; int err; unsigned int result_num, result_den; int result_diff; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->min; int diff; if (q == 0) q = 1; den = div_up(num, q); if (den < rats[k].den_min) continue; if (den > rats[k].den_max) den = rats[k].den_max; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den -= r; } diff = num - q * den; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); result_num = best_num; result_diff = best_diff; result_den = best_den; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->max; int diff; if (q == 0) { i->empty = 1; return -EINVAL; } den = div_down(num, q); if (den > rats[k].den_max) continue; if (den < rats[k].den_min) den = rats[k].den_min; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den += rats[k].den_step - r; } diff = q * den - num; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (best_diff * result_den < result_diff * best_den) { result_num = best_num; result_den = best_den; } if (nump) *nump = result_num; if (denp) *denp = result_den; } return err; } EXPORT_SYMBOL(snd_interval_ratnum); /** * snd_interval_ratden - refine the interval value * @i: interval to refine * @rats_count: number of struct ratden * @rats: struct ratden array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ static int snd_interval_ratden(struct snd_interval *i, unsigned int rats_count, const struct snd_ratden *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_diff, best_den; unsigned int k; struct snd_interval t; int err; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->min; int diff; num = mul(q, den); if (num > rats[k].num_max) continue; if (num < rats[k].num_min) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num += rats[k].num_step - r; } diff = num - q * den; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->max; int diff; num = mul(q, den); if (num < rats[k].num_min) continue; if (num > rats[k].num_max) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num -= r; } diff = q * den - num; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (nump) *nump = best_num; if (denp) *denp = best_den; } return err; } /** * snd_interval_list - refine the interval value from the list * @i: the interval value to refine * @count: the number of elements in the list * @list: the value list * @mask: the bit-mask to evaluate * * Refines the interval value from the list. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_list(struct snd_interval *i, unsigned int count, const unsigned int *list, unsigned int mask) { unsigned int k; struct snd_interval list_range; if (!count) { i->empty = 1; return -EINVAL; } snd_interval_any(&list_range); list_range.min = UINT_MAX; list_range.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; if (!snd_interval_test(i, list[k])) continue; list_range.min = min(list_range.min, list[k]); list_range.max = max(list_range.max, list[k]); } return snd_interval_refine(i, &list_range); } EXPORT_SYMBOL(snd_interval_list); /** * snd_interval_ranges - refine the interval value from the list of ranges * @i: the interval value to refine * @count: the number of elements in the list of ranges * @ranges: the ranges list * @mask: the bit-mask to evaluate * * Refines the interval value from the list of ranges. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *ranges, unsigned int mask) { unsigned int k; struct snd_interval range_union; struct snd_interval range; if (!count) { snd_interval_none(i); return -EINVAL; } snd_interval_any(&range_union); range_union.min = UINT_MAX; range_union.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; snd_interval_copy(&range, &ranges[k]); if (snd_interval_refine(&range, i) < 0) continue; if (snd_interval_empty(&range)) continue; if (range.min < range_union.min) { range_union.min = range.min; range_union.openmin = 1; } if (range.min == range_union.min && !range.openmin) range_union.openmin = 0; if (range.max > range_union.max) { range_union.max = range.max; range_union.openmax = 1; } if (range.max == range_union.max && !range.openmax) range_union.openmax = 0; } return snd_interval_refine(i, &range_union); } EXPORT_SYMBOL(snd_interval_ranges); static int snd_interval_step(struct snd_interval *i, unsigned int step) { unsigned int n; int changed = 0; n = i->min % step; if (n != 0 || i->openmin) { i->min += step - n; i->openmin = 0; changed = 1; } n = i->max % step; if (n != 0 || i->openmax) { i->max -= n; i->openmax = 0; changed = 1; } if (snd_interval_checkempty(i)) { i->empty = 1; return -EINVAL; } return changed; } /* Info constraints helpers */ /** * snd_pcm_hw_rule_add - add the hw-constraint rule * @runtime: the pcm runtime instance * @cond: condition bits * @var: the variable to evaluate * @func: the evaluation function * @private: the private data pointer passed to function * @dep: the dependent variables * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_pcm_hw_rule *c; unsigned int k; va_list args; va_start(args, dep); if (constrs->rules_num >= constrs->rules_all) { struct snd_pcm_hw_rule *new; unsigned int new_rules = constrs->rules_all + 16; new = krealloc_array(constrs->rules, new_rules, sizeof(*c), GFP_KERNEL); if (!new) { va_end(args); return -ENOMEM; } constrs->rules = new; constrs->rules_all = new_rules; } c = &constrs->rules[constrs->rules_num]; c->cond = cond; c->func = func; c->var = var; c->private = private; k = 0; while (1) { if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { va_end(args); return -EINVAL; } c->deps[k++] = dep; if (dep < 0) break; dep = va_arg(args, int); } constrs->rules_num++; va_end(args); return 0; } EXPORT_SYMBOL(snd_pcm_hw_rule_add); /** * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the bitmap mask * * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int32_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); *maskp->bits &= mask; memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ if (*maskp->bits == 0) return -EINVAL; return 0; } /** * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the 64bit bitmap mask * * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); maskp->bits[0] &= (u_int32_t)mask; maskp->bits[1] &= (u_int32_t)(mask >> 32); memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ if (! maskp->bits[0] && ! maskp->bits[1]) return -EINVAL; return 0; } EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); /** * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the integer constraint * * Apply the constraint of integer to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; return snd_interval_setinteger(constrs_interval(constrs, var)); } EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); /** * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the range * @min: the minimal value * @max: the maximal value * * Apply the min/max range constraint to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_interval t; t.min = min; t.max = max; t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(constrs_interval(constrs, var), &t); } EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_list *list = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); } /** * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list constraint * @l: list * * Apply the list of constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_list *l) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_list, (void *)l, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_list); static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_ranges *r = rule->private; return snd_interval_ranges(hw_param_interval(params, rule->var), r->count, r->ranges, r->mask); } /** * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list of range constraints * @r: ranges * * Apply the list of range constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ranges *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ranges, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratnums *r = rule->private; unsigned int num = 0, den = 0; int err; err = snd_interval_ratnum(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratnums constraint * @r: struct snd_ratnums constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratnums *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratnums, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratdens *r = rule->private; unsigned int num = 0, den = 0; int err = snd_interval_ratden(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratdens constraint * @r: struct snd_ratdens constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratdens *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratdens, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int l = (unsigned long) rule->private; int width = l & 0xffff; unsigned int msbits = l >> 16; const struct snd_interval *i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (!snd_interval_single(i)) return 0; if ((snd_interval_value(i) == width) || (width == 0 && snd_interval_value(i) > msbits)) params->msbits = min_not_zero(params->msbits, msbits); return 0; } /** * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule * @runtime: PCM runtime instance * @cond: condition bits * @width: sample bits width * @msbits: msbits width * * This constraint will set the number of most significant bits (msbits) if a * sample format with the specified width has been select. If width is set to 0 * the msbits will be set for any sample format with a width larger than the * specified msbits. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits) { unsigned long l = (msbits << 16) | width; return snd_pcm_hw_rule_add(runtime, cond, -1, snd_pcm_hw_rule_msbits, (void*) l, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned long step = (unsigned long) rule->private; return snd_interval_step(hw_param_interval(params, rule->var), step); } /** * snd_pcm_hw_constraint_step - add a hw constraint step rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the step constraint * @step: step size * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_step, (void *) step, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_step); static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { static const unsigned int pow2_sizes[] = { 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 }; return snd_interval_list(hw_param_interval(params, rule->var), ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); } /** * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the power-of-2 constraint * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_pow2, NULL, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; struct snd_interval *rate; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); return snd_interval_list(rate, 1, &base_rate, 0); } /** * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling * @runtime: PCM runtime instance * @base_rate: the rate at which the hardware does not resample * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, unsigned int base_rate) { return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_noresample_func, (void *)(uintptr_t)base_rate, SNDRV_PCM_HW_PARAM_RATE, -1); } EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_any(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } if (hw_is_interval(var)) { snd_interval_any(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } snd_BUG(); } void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) { unsigned int k; memset(params, 0, sizeof(*params)); for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) _snd_pcm_hw_param_any(params, k); for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) _snd_pcm_hw_param_any(params, k); params->info = ~0U; } EXPORT_SYMBOL(_snd_pcm_hw_params_any); /** * snd_pcm_hw_param_value - return @params field @var value * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Return: The value for field @var if it's fixed in configuration space * defined by @params. -%EINVAL otherwise. */ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { const struct snd_mask *mask = hw_param_mask_c(params, var); if (!snd_mask_single(mask)) return -EINVAL; if (dir) *dir = 0; return snd_mask_value(mask); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (!snd_interval_single(i)) return -EINVAL; if (dir) *dir = i->openmin; return snd_interval_value(i); } return -EINVAL; } EXPORT_SYMBOL(snd_pcm_hw_param_value); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_none(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else if (hw_is_interval(var)) { snd_interval_none(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else { snd_BUG(); } } EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_first(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_first(hw_param_interval(params, var)); else return -EINVAL; if (changed > 0) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_first - refine config space and return minimum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values > minimum. Reduce configuration space accordingly. * * Return: The minimum, or a negative error code on failure. */ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_first(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_first); static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_last(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_last(hw_param_interval(params, var)); else return -EINVAL; if (changed > 0) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_last - refine config space and return maximum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values < maximum. Reduce configuration space accordingly. * * Return: The maximum, or a negative error code on failure. */ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_last(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_last); /** * snd_pcm_hw_params_bits - Get the number of bits per the sample. * @p: hardware parameters * * Return: The number of bits per sample based on the format, * subformat and msbits the specified hw params has. */ int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p) { snd_pcm_subformat_t subformat = params_subformat(p); snd_pcm_format_t format = params_format(p); switch (format) { case SNDRV_PCM_FORMAT_S32_LE: case SNDRV_PCM_FORMAT_U32_LE: case SNDRV_PCM_FORMAT_S32_BE: case SNDRV_PCM_FORMAT_U32_BE: switch (subformat) { case SNDRV_PCM_SUBFORMAT_MSBITS_20: return 20; case SNDRV_PCM_SUBFORMAT_MSBITS_24: return 24; case SNDRV_PCM_SUBFORMAT_MSBITS_MAX: case SNDRV_PCM_SUBFORMAT_STD: default: break; } fallthrough; default: return snd_pcm_format_width(format); } } EXPORT_SYMBOL(snd_pcm_hw_params_bits); static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_runtime *runtime = substream->runtime; guard(pcm_stream_lock_irqsave)(substream); if (snd_pcm_running(substream) && snd_pcm_update_hw_ptr(substream) >= 0) runtime->status->hw_ptr %= runtime->buffer_size; else { runtime->status->hw_ptr = 0; runtime->hw_ptr_wrap = 0; } return 0; } static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_channel_info *info = arg; struct snd_pcm_runtime *runtime = substream->runtime; int width; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { info->offset = -1; return 0; } width = snd_pcm_format_physical_width(runtime->format); if (width < 0) return width; info->offset = 0; switch (runtime->access) { case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: case SNDRV_PCM_ACCESS_RW_INTERLEAVED: info->first = info->channel * width; info->step = runtime->channels * width; break; case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: { size_t size = runtime->dma_bytes / runtime->channels; info->first = info->channel * size * 8; info->step = width; break; } default: snd_BUG(); break; } return 0; } static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_hw_params *params = arg; snd_pcm_format_t format; int channels; ssize_t frame_size; params->fifo_size = substream->runtime->hw.fifo_size; if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { format = params_format(params); channels = params_channels(params); frame_size = snd_pcm_format_size(format, channels); if (frame_size > 0) params->fifo_size /= frame_size; } return 0; } static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream, void *arg) { static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (substream->runtime->std_sync_id) snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id)); return 0; } /** * snd_pcm_lib_ioctl - a generic PCM ioctl callback * @substream: the pcm substream instance * @cmd: ioctl command * @arg: ioctl argument * * Processes the generic ioctl commands for PCM. * Can be passed as the ioctl callback for PCM ops. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { switch (cmd) { case SNDRV_PCM_IOCTL1_RESET: return snd_pcm_lib_ioctl_reset(substream, arg); case SNDRV_PCM_IOCTL1_CHANNEL_INFO: return snd_pcm_lib_ioctl_channel_info(substream, arg); case SNDRV_PCM_IOCTL1_FIFO_SIZE: return snd_pcm_lib_ioctl_fifo_size(substream, arg); case SNDRV_PCM_IOCTL1_SYNC_ID: return snd_pcm_lib_ioctl_sync_id(substream, arg); } return -ENXIO; } EXPORT_SYMBOL(snd_pcm_lib_ioctl); /** * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period * under acquired lock of PCM substream. * @substream: the instance of pcm substream. * * This function is called when the batch of audio data frames as the same size as the period of * buffer is already processed in audio data transmission. * * The call of function updates the status of runtime with the latest position of audio data * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM * substream according to configured threshold. * * The function is intended to use for the case that PCM driver operates audio data frames under * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead * since lock of PCM substream should be acquired in advance. * * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of * function: * * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state. * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state. * - .get_time_info - to retrieve audio time stamp if needed. * * Even if more than one periods have elapsed since the last call, you have to call this only once. */ void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; if (!snd_pcm_running(substream) || snd_pcm_update_hw_ptr0(substream, 1) < 0) goto _end; #ifdef CONFIG_SND_PCM_TIMER if (substream->timer_running) snd_timer_interrupt(substream->timer, 1); #endif _end: snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN); } EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock); /** * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of * PCM substream. * @substream: the instance of PCM substream. * * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for * acquiring lock of PCM substream voluntarily. * * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that * the batch of audio data frames as the same size as the period of buffer is already processed in * audio data transmission. */ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) { if (snd_BUG_ON(!substream)) return; guard(pcm_stream_lock_irqsave)(substream); snd_pcm_period_elapsed_under_stream_lock(substream); } EXPORT_SYMBOL(snd_pcm_period_elapsed); /* * Wait until avail_min data becomes available * Returns a negative error code if any error occurs during operation. * The available space is stored on availp. When err = 0 and avail = 0 * on the capture stream, it indicates the stream is in DRAINING state. */ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t *availp) { struct snd_pcm_runtime *runtime = substream->runtime; int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; wait_queue_entry_t wait; int err = 0; snd_pcm_uframes_t avail = 0; long wait_time, tout; init_waitqueue_entry(&wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&runtime->tsleep, &wait); if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { /* use wait time from substream if available */ if (substream->wait_time) { wait_time = substream->wait_time; } else { wait_time = 100; if (runtime->rate) { long t = runtime->buffer_size * 1100 / runtime->rate; wait_time = max(t, wait_time); } } wait_time = msecs_to_jiffies(wait_time); } for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } /* * We need to check if space became available already * (and thus the wakeup happened already) first to close * the race of space already having become available. * This check must happen after been added to the waitqueue * and having current state be INTERRUPTIBLE. */ avail = snd_pcm_avail(substream); if (avail >= runtime->twake) break; snd_pcm_stream_unlock_irq(substream); tout = schedule_timeout(wait_time); snd_pcm_stream_lock_irq(substream); set_current_state(TASK_INTERRUPTIBLE); switch (runtime->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _endloop; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _endloop; case SNDRV_PCM_STATE_DRAINING: if (is_playback) err = -EPIPE; else avail = 0; /* indicate draining */ goto _endloop; case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; case SNDRV_PCM_STATE_PAUSED: continue; } if (!tout) { pcm_dbg(substream->pcm, "%s timeout (DMA or IRQ trouble?)\n", is_playback ? "playback write" : "capture read"); err = -EIO; break; } } _endloop: set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; } typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes); typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f, bool); /* calculate the target DMA-buffer position to be written/read */ static void *get_dma_ptr(struct snd_pcm_runtime *runtime, int channel, unsigned long hwoff) { return runtime->dma_area + hwoff + channel * (runtime->dma_bytes / runtime->channels); } /* default copy ops for write; used for both interleaved and non- modes */ static int default_write_copy(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff), bytes, iter) != bytes) return -EFAULT; return 0; } /* fill silence instead of copy data; called as a transfer helper * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when * a NULL buffer is passed */ static int fill_silence(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) return 0; if (substream->ops->fill_silence) return substream->ops->fill_silence(substream, channel, hwoff, bytes); snd_pcm_format_set_silence(runtime->format, get_dma_ptr(runtime, channel, hwoff), bytes_to_samples(runtime, bytes)); return 0; } /* default copy ops for read; used for both interleaved and non- modes */ static int default_read_copy(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff), bytes, iter) != bytes) return -EFAULT; return 0; } /* call transfer with the filled iov_iter */ static int do_transfer(struct snd_pcm_substream *substream, int c, unsigned long hwoff, void *data, unsigned long bytes, pcm_transfer_f transfer, bool in_kernel) { struct iov_iter iter; int err, type; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) type = ITER_SOURCE; else type = ITER_DEST; if (in_kernel) { struct kvec kvec = { data, bytes }; iov_iter_kvec(&iter, type, &kvec, 1, bytes); return transfer(substream, c, hwoff, &iter, bytes); } err = import_ubuf(type, (__force void __user *)data, bytes, &iter); if (err) return err; return transfer(substream, c, hwoff, &iter, bytes); } /* call transfer function with the converted pointers and sizes; * for interleaved mode, it's one shot for all samples */ static int interleaved_copy(struct snd_pcm_substream *substream, snd_pcm_uframes_t hwoff, void *data, snd_pcm_uframes_t off, snd_pcm_uframes_t frames, pcm_transfer_f transfer, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; /* convert to bytes */ hwoff = frames_to_bytes(runtime, hwoff); off = frames_to_bytes(runtime, off); frames = frames_to_bytes(runtime, frames); return do_transfer(substream, 0, hwoff, data + off, frames, transfer, in_kernel); } /* call transfer function with the converted pointers and sizes for each * non-interleaved channel; when buffer is NULL, silencing instead of copying */ static int noninterleaved_copy(struct snd_pcm_substream *substream, snd_pcm_uframes_t hwoff, void *data, snd_pcm_uframes_t off, snd_pcm_uframes_t frames, pcm_transfer_f transfer, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int channels = runtime->channels; void **bufs = data; int c, err; /* convert to bytes; note that it's not frames_to_bytes() here. * in non-interleaved mode, we copy for each channel, thus * each copy is n_samples bytes x channels = whole frames. */ off = samples_to_bytes(runtime, off); frames = samples_to_bytes(runtime, frames); hwoff = samples_to_bytes(runtime, hwoff); for (c = 0; c < channels; ++c, ++bufs) { if (!data || !*bufs) err = fill_silence(substream, c, hwoff, NULL, frames); else err = do_transfer(substream, c, hwoff, *bufs + off, frames, transfer, in_kernel); if (err < 0) return err; } return 0; } /* fill silence on the given buffer position; * called from snd_pcm_playback_silence() */ static int fill_silence_frames(struct snd_pcm_substream *substream, snd_pcm_uframes_t off, snd_pcm_uframes_t frames) { if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) return interleaved_copy(substream, off, NULL, 0, frames, fill_silence, true); else return noninterleaved_copy(substream, off, NULL, 0, frames, fill_silence, true); } /* sanity-check for read/write methods */ static int pcm_sanity_check(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area)) return -EINVAL; if (runtime->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; return 0; } static int pcm_accessible_state(struct snd_pcm_runtime *runtime) { switch (runtime->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: return 0; case SNDRV_PCM_STATE_XRUN: return -EPIPE; case SNDRV_PCM_STATE_SUSPENDED: return -ESTRPIPE; default: return -EBADFD; } } /* update to the given appl_ptr and call ack callback if needed; * when an error is returned, take back to the original value */ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, snd_pcm_uframes_t appl_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; snd_pcm_sframes_t diff; int ret; if (old_appl_ptr == appl_ptr) return 0; if (appl_ptr >= runtime->boundary) return -EINVAL; /* * check if a rewind is requested by the application */ if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) { diff = appl_ptr - old_appl_ptr; if (diff >= 0) { if (diff > runtime->buffer_size) return -EINVAL; } else { if (runtime->boundary + diff > runtime->buffer_size) return -EINVAL; } } runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) { ret = substream->ops->ack(substream); if (ret < 0) { runtime->control->appl_ptr = old_appl_ptr; if (ret == -EPIPE) __snd_pcm_xrun(substream); return ret; } } trace_applptr(substream, old_appl_ptr, appl_ptr); return 0; } /* the common loop for read/write data */ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *data, bool interleaved, snd_pcm_uframes_t size, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; snd_pcm_uframes_t avail; pcm_copy_f writer; pcm_transfer_f transfer; bool nonblock; bool is_playback; int err; err = pcm_sanity_check(substream); if (err < 0) return err; is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; if (interleaved) { if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && runtime->channels > 1) return -EINVAL; writer = interleaved_copy; } else { if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; writer = noninterleaved_copy; } if (!data) { if (is_playback) transfer = fill_silence; else return -EINVAL; } else { if (substream->ops->copy) transfer = substream->ops->copy; else transfer = is_playback ? default_write_copy : default_read_copy; } if (size == 0) return 0; nonblock = !!(substream->f_flags & O_NONBLOCK); snd_pcm_stream_lock_irq(substream); err = pcm_accessible_state(runtime); if (err < 0) goto _end_unlock; runtime->twake = runtime->control->avail_min ? : 1; if (runtime->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); /* * If size < start_threshold, wait indefinitely. Another * thread may start capture */ if (!is_playback && runtime->state == SNDRV_PCM_STATE_PREPARED && size >= runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } avail = snd_pcm_avail(substream); while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t cont; if (!avail) { if (!is_playback && runtime->state == SNDRV_PCM_STATE_DRAINING) { snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); goto _end_unlock; } if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; if (!avail) continue; /* draining */ } frames = size > avail ? avail : size; appl_ptr = READ_ONCE(runtime->control->appl_ptr); appl_ofs = appl_ptr % runtime->buffer_size; cont = runtime->buffer_size - appl_ofs; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { err = -EINVAL; goto _end_unlock; } if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) { err = -EBUSY; goto _end_unlock; } snd_pcm_stream_unlock_irq(substream); if (!is_playback) snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU); err = writer(substream, appl_ofs, data, offset, frames, transfer, in_kernel); if (is_playback) snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE); snd_pcm_stream_lock_irq(substream); atomic_dec(&runtime->buffer_accessing); if (err < 0) goto _end_unlock; err = pcm_accessible_state(runtime); if (err < 0) goto _end_unlock; appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; err = pcm_lib_apply_appl_ptr(substream, appl_ptr); if (err < 0) goto _end_unlock; offset += frames; size -= frames; xfer += frames; avail -= frames; if (is_playback && runtime->state == SNDRV_PCM_STATE_PREPARED && snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } EXPORT_SYMBOL(__snd_pcm_lib_xfer); /* * standard channel mapping helpers */ /* default channel maps for multi-channel playbacks, up to 8 channels */ const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) { if (ch > info->max_channels) return false; return !info->channel_mask || (info->channel_mask & (1U << ch)); } static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = info->max_channels; uinfo->value.integer.min = 0; uinfo->value.integer.max = SNDRV_CHMAP_LAST; return 0; } /* get callback for channel map ctl element * stores the channel position firstly matching with the current channels */ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; if (!info->chmap) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) return -ENODEV; memset(ucontrol->value.integer.value, 0, sizeof(long) * info->max_channels); if (!substream->runtime) return 0; /* no channels set */ for (map = info->chmap; map->channels; map++) { int i; if (map->channels == substream->runtime->channels && valid_chmap_channels(info, map->channels)) { for (i = 0; i < map->channels; i++) ucontrol->value.integer.value[i] = map->map[i]; return 0; } } return -EINVAL; } /* tlv callback for channel map ctl element * expands the pre-defined channel maps in a form of TLV */ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); const struct snd_pcm_chmap_elem *map; unsigned int __user *dst; int c, count = 0; if (!info->chmap) return -EINVAL; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) return -EFAULT; size -= 8; dst = tlv + 2; for (map = info->chmap; map->channels; map++) { int chs_bytes = map->channels * 4; if (!valid_chmap_channels(info, map->channels)) continue; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || put_user(chs_bytes, dst + 1)) return -EFAULT; dst += 2; size -= 8; count += 8; if (size < chs_bytes) return -ENOMEM; size -= chs_bytes; count += chs_bytes; for (c = 0; c < map->channels; c++) { if (put_user(map->map[c], dst)) return -EFAULT; dst++; } } if (put_user(count, tlv + 1)) return -EFAULT; return 0; } static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); info->pcm->streams[info->stream].chmap_kctl = NULL; kfree(info); } /** * snd_pcm_add_chmap_ctls - create channel-mapping control elements * @pcm: the assigned PCM instance * @stream: stream direction * @chmap: channel map elements (for query) * @max_channels: the max number of channels for the stream * @private_value: the value passed to each kcontrol's private_value field * @info_ret: store struct snd_pcm_chmap instance if non-NULL * * Create channel-mapping control elements assigned to the given PCM stream(s). * Return: Zero if successful, or a negative error value. */ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, const struct snd_pcm_chmap_elem *chmap, int max_channels, unsigned long private_value, struct snd_pcm_chmap **info_ret) { struct snd_pcm_chmap *info; struct snd_kcontrol_new knew = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, .info = pcm_chmap_ctl_info, .get = pcm_chmap_ctl_get, .tlv.c = pcm_chmap_ctl_tlv, }; int err; if (WARN_ON(pcm->streams[stream].chmap_kctl)) return -EBUSY; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->pcm = pcm; info->stream = stream; info->chmap = chmap; info->max_channels = max_channels; if (stream == SNDRV_PCM_STREAM_PLAYBACK) knew.name = "Playback Channel Map"; else knew.name = "Capture Channel Map"; knew.device = pcm->device; knew.count = pcm->streams[stream].substream_count; knew.private_value = private_value; info->kctl = snd_ctl_new1(&knew, info); if (!info->kctl) { kfree(info); return -ENOMEM; } info->kctl->private_free = pcm_chmap_ctl_private_free; err = snd_ctl_add(pcm->card, info->kctl); if (err < 0) return err; pcm->streams[stream].chmap_kctl = info->kctl; if (info_ret) *info_ret = info; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
// SPDX-License-Identifier: GPL-2.0 /* * MIPI SyS-T framing protocol for STM devices. * Copyright (c) 2018, Intel Corporation. */ #include <linux/configfs.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/uuid.h> #include <linux/stm.h> #include "stm.h" enum sys_t_message_type { MIPI_SYST_TYPE_BUILD = 0, MIPI_SYST_TYPE_SHORT32, MIPI_SYST_TYPE_STRING, MIPI_SYST_TYPE_CATALOG, MIPI_SYST_TYPE_RAW = 6, MIPI_SYST_TYPE_SHORT64, MIPI_SYST_TYPE_CLOCK, MIPI_SYST_TYPE_SBD, }; enum sys_t_message_severity { MIPI_SYST_SEVERITY_MAX = 0, MIPI_SYST_SEVERITY_FATAL, MIPI_SYST_SEVERITY_ERROR, MIPI_SYST_SEVERITY_WARNING, MIPI_SYST_SEVERITY_INFO, MIPI_SYST_SEVERITY_USER1, MIPI_SYST_SEVERITY_USER2, MIPI_SYST_SEVERITY_DEBUG, }; enum sys_t_message_build_subtype { MIPI_SYST_BUILD_ID_COMPACT32 = 0, MIPI_SYST_BUILD_ID_COMPACT64, MIPI_SYST_BUILD_ID_LONG, }; enum sys_t_message_clock_subtype { MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1, }; enum sys_t_message_string_subtype { MIPI_SYST_STRING_GENERIC = 1, MIPI_SYST_STRING_FUNCTIONENTER, MIPI_SYST_STRING_FUNCTIONEXIT, MIPI_SYST_STRING_INVALIDPARAM = 5, MIPI_SYST_STRING_ASSERT = 7, MIPI_SYST_STRING_PRINTF_32 = 11, MIPI_SYST_STRING_PRINTF_64 = 12, }; /** * enum sys_t_message_sbd_subtype - SyS-T SBD message subtypes * @MIPI_SYST_SBD_ID32: SBD message with 32-bit message ID * @MIPI_SYST_SBD_ID64: SBD message with 64-bit message ID * * Structured Binary Data messages can send information of arbitrary length, * together with ID's that describe BLOB's content and layout. */ enum sys_t_message_sbd_subtype { MIPI_SYST_SBD_ID32 = 0, MIPI_SYST_SBD_ID64 = 1, }; #define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t)) #define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4) #define MIPI_SYST_OPT_LOC BIT(8) #define MIPI_SYST_OPT_LEN BIT(9) #define MIPI_SYST_OPT_CHK BIT(10) #define MIPI_SYST_OPT_TS BIT(11) #define MIPI_SYST_UNIT(u) ((u32)(u) << 12) #define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16) #define MIPI_SYST_OPT_GUID BIT(23) #define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24) #define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \ MIPI_SYST_ORIGIN(u >> 4)) #define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \ MIPI_SYST_SUBTYPE(t ## _ ## s)) #define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \ MIPI_SYST_SEVERITY(INFO) | \ MIPI_SYST_OPT_GUID) #define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \ MIPI_SYST_SEVERITY(MAX)) /* * SyS-T and ftrace headers are compatible to an extent that ftrace event ID * and args can be treated as SyS-T SBD message with 64-bit ID and arguments * BLOB right behind the header without modification. Bits [16:63] coming * together with message ID from ftrace aren't used by SBD and must be zeroed. * * 0 15 16 23 24 31 32 39 40 63 * ftrace: <event_id> <flags> <preempt> <-pid-> <----> <args> * SBD: <------- msg_id ------------------------------> <BLOB> */ #define SBD_HEADER (MIPI_SYST_TYPES(SBD, ID64) | \ MIPI_SYST_SEVERITY(INFO) | \ MIPI_SYST_OPT_GUID) struct sys_t_policy_node { uuid_t uuid; bool do_len; unsigned long ts_interval; unsigned long clocksync_interval; }; struct sys_t_output { struct sys_t_policy_node node; unsigned long ts_jiffies; unsigned long clocksync_jiffies; }; static void sys_t_policy_node_init(void *priv) { struct sys_t_policy_node *pn = priv; uuid_gen(&pn->uuid); } static int sys_t_output_open(void *priv, struct stm_output *output) { struct sys_t_policy_node *pn = priv; struct sys_t_output *opriv; opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC); if (!opriv) return -ENOMEM; memcpy(&opriv->node, pn, sizeof(opriv->node)); output->pdrv_private = opriv; return 0; } static void sys_t_output_close(struct stm_output *output) { kfree(output->pdrv_private); } static ssize_t sys_t_policy_uuid_show(struct config_item *item, char *page) { struct sys_t_policy_node *pn = to_pdrv_policy_node(item); return sprintf(page, "%pU\n", &pn->uuid); } static ssize_t sys_t_policy_uuid_store(struct config_item *item, const char *page, size_t count) { struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; struct sys_t_policy_node *pn = to_pdrv_policy_node(item); int ret; mutex_lock(mutexp); ret = uuid_parse(page, &pn->uuid); mutex_unlock(mutexp); return ret < 0 ? ret : count; } CONFIGFS_ATTR(sys_t_policy_, uuid); static ssize_t sys_t_policy_do_len_show(struct config_item *item, char *page) { struct sys_t_policy_node *pn = to_pdrv_policy_node(item); return sprintf(page, "%d\n", pn->do_len); } static ssize_t sys_t_policy_do_len_store(struct config_item *item, const char *page, size_t count) { struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; struct sys_t_policy_node *pn = to_pdrv_policy_node(item); int ret; mutex_lock(mutexp); ret = kstrtobool(page, &pn->do_len); mutex_unlock(mutexp); return ret ? ret : count; } CONFIGFS_ATTR(sys_t_policy_, do_len); static ssize_t sys_t_policy_ts_interval_show(struct config_item *item, char *page) { struct sys_t_policy_node *pn = to_pdrv_policy_node(item); return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval)); } static ssize_t sys_t_policy_ts_interval_store(struct config_item *item, const char *page, size_t count) { struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; struct sys_t_policy_node *pn = to_pdrv_policy_node(item); unsigned int ms; int ret; mutex_lock(mutexp); ret = kstrtouint(page, 10, &ms); mutex_unlock(mutexp); if (!ret) { pn->ts_interval = msecs_to_jiffies(ms); return count; } return ret; } CONFIGFS_ATTR(sys_t_policy_, ts_interval); static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item, char *page) { struct sys_t_policy_node *pn = to_pdrv_policy_node(item); return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval)); } static ssize_t sys_t_policy_clocksync_interval_store(struct config_item *item, const char *page, size_t count) { struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex; struct sys_t_policy_node *pn = to_pdrv_policy_node(item); unsigned int ms; int ret; mutex_lock(mutexp); ret = kstrtouint(page, 10, &ms); mutex_unlock(mutexp); if (!ret) { pn->clocksync_interval = msecs_to_jiffies(ms); return count; } return ret; } CONFIGFS_ATTR(sys_t_policy_, clocksync_interval); static struct configfs_attribute *sys_t_policy_attrs[] = { &sys_t_policy_attr_uuid, &sys_t_policy_attr_do_len, &sys_t_policy_attr_ts_interval, &sys_t_policy_attr_clocksync_interval, NULL, }; static inline bool sys_t_need_ts(struct sys_t_output *op) { if (op->node.ts_interval && time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) { op->ts_jiffies = jiffies; return true; } return false; } static bool sys_t_need_clock_sync(struct sys_t_output *op) { if (op->node.clocksync_interval && time_after(jiffies, op->clocksync_jiffies + op->node.clocksync_interval)) { op->clocksync_jiffies = jiffies; return true; } return false; } static ssize_t sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c) { u32 header = CLOCK_SYNC_HEADER; const unsigned char nil = 0; u64 payload[2]; /* Clock value and frequency */ ssize_t sz; sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, 4, (u8 *)&header); if (sz <= 0) return sz; payload[0] = ktime_get_real_ns(); payload[1] = NSEC_PER_SEC; sz = stm_data_write(data, m, c, false, &payload, sizeof(payload)); if (sz <= 0) return sz; data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); return sizeof(header) + sizeof(payload); } static inline u32 sys_t_header(struct stm_source_data *source) { if (source && source->type == STM_FTRACE) return SBD_HEADER; return DATA_HEADER; } static ssize_t sys_t_write_data(struct stm_data *data, struct stm_source_data *source, unsigned int master, unsigned int channel, bool ts_first, const void *buf, size_t count) { ssize_t sz; const unsigned char nil = 0; /* * Ftrace is zero-copy compatible with SyS-T SBD, but requires * special handling of first 64 bits. Trim and send them separately * to avoid damage on original ftrace buffer. */ if (source && source->type == STM_FTRACE) { u64 compat_ftrace_header; ssize_t header_sz; ssize_t buf_sz; if (count < sizeof(compat_ftrace_header)) return -EINVAL; /* SBD only makes use of low 16 bits (event ID) from ftrace event */ compat_ftrace_header = *(u64 *)buf & 0xffff; header_sz = stm_data_write(data, master, channel, false, &compat_ftrace_header, sizeof(compat_ftrace_header)); if (header_sz != sizeof(compat_ftrace_header)) return header_sz; buf_sz = stm_data_write(data, master, channel, false, buf + header_sz, count - header_sz); if (buf_sz != count - header_sz) return buf_sz; sz = header_sz + buf_sz; } else { sz = stm_data_write(data, master, channel, false, buf, count); } if (sz <= 0) return sz; data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); return sz; } static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, unsigned int chan, const char *buf, size_t count, struct stm_source_data *source) { struct sys_t_output *op = output->pdrv_private; unsigned int c = output->channel + chan; unsigned int m = output->master; u32 header = sys_t_header(source); u8 uuid[UUID_SIZE]; ssize_t sz; /* We require an existing policy node to proceed */ if (!op) return -EINVAL; if (sys_t_need_clock_sync(op)) { sz = sys_t_clock_sync(data, m, c); if (sz <= 0) return sz; } if (op->node.do_len) header |= MIPI_SYST_OPT_LEN; if (sys_t_need_ts(op)) header |= MIPI_SYST_OPT_TS; /* * STP framing rules for SyS-T frames: * * the first packet of the SyS-T frame is timestamped; * * the last packet is a FLAG. */ /* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */ /* HEADER */ sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, 4, (u8 *)&header); if (sz <= 0) return sz; /* GUID */ export_uuid(uuid, &op->node.uuid); sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid)); if (sz <= 0) return sz; /* [LENGTH] */ if (op->node.do_len) { u16 length = count; sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2, (u8 *)&length); if (sz <= 0) return sz; } /* [TIMESTAMP] */ if (header & MIPI_SYST_OPT_TS) { u64 ts = ktime_get_real_ns(); sz = stm_data_write(data, m, c, false, &ts, sizeof(ts)); if (sz <= 0) return sz; } /* DATA */ return sys_t_write_data(data, source, m, c, false, buf, count); } static const struct stm_protocol_driver sys_t_pdrv = { .owner = THIS_MODULE, .name = "p_sys-t", .priv_sz = sizeof(struct sys_t_policy_node), .write = sys_t_write, .policy_attr = sys_t_policy_attrs, .policy_node_init = sys_t_policy_node_init, .output_open = sys_t_output_open, .output_close = sys_t_output_close, }; static int sys_t_stm_init(void) { return stm_register_protocol(&sys_t_pdrv); } static void sys_t_stm_exit(void) { stm_unregister_protocol(&sys_t_pdrv); } module_init(sys_t_stm_init); module_exit(sys_t_stm_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver"); MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2023-2024 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #ifndef __XFS_SCRUB_DIRTREE_H__ #define __XFS_SCRUB_DIRTREE_H__ /* * Each of these represents one parent pointer path step in a chain going * up towards the directory tree root. These are stored inside an xfarray. */ struct xchk_dirpath_step { /* Directory entry name associated with this parent link. */ xfblob_cookie name_cookie; unsigned int name_len; /* Handle of the parent directory. */ struct xfs_parent_rec pptr_rec; }; enum xchk_dirpath_outcome { XCHK_DIRPATH_SCANNING = 0, /* still being put together */ XCHK_DIRPATH_DELETE, /* delete this path */ XCHK_DIRPATH_CORRUPT, /* corruption detected in path */ XCHK_DIRPATH_LOOP, /* cycle detected further up */ XCHK_DIRPATH_STALE, /* path is stale */ XCHK_DIRPATH_OK, /* path reaches the root */ XREP_DIRPATH_DELETING, /* path is being deleted */ XREP_DIRPATH_DELETED, /* path has been deleted */ XREP_DIRPATH_ADOPTING, /* path is being adopted */ XREP_DIRPATH_ADOPTED, /* path has been adopted */ }; /* * Each of these represents one parent pointer path out of the directory being * scanned. These exist in-core, and hopefully there aren't more than a * handful of them. */ struct xchk_dirpath { struct list_head list; /* Index of the first step in this path. */ xfarray_idx_t first_step; /* Index of the second step in this path. */ xfarray_idx_t second_step; /* Inodes seen while walking this path. */ struct xino_bitmap seen_inodes; /* Number of steps in this path. */ unsigned int nr_steps; /* Which path is this? */ unsigned int path_nr; /* What did we conclude from following this path? */ enum xchk_dirpath_outcome outcome; }; struct xchk_dirtree_outcomes { /* Number of XCHK_DIRPATH_DELETE */ unsigned int bad; /* Number of XCHK_DIRPATH_CORRUPT or XCHK_DIRPATH_LOOP */ unsigned int suspect; /* Number of XCHK_DIRPATH_OK */ unsigned int good; /* Directory needs to be added to lost+found */ bool needs_adoption; }; struct xchk_dirtree { struct xfs_scrub *sc; /* Root inode that we're looking for. */ xfs_ino_t root_ino; /* * This is the inode that we're scanning. The live update hook can * continue to be called after xchk_teardown drops sc->ip but before * it calls buf_cleanup, so we keep a copy. */ xfs_ino_t scan_ino; /* * If we start deleting redundant paths to this subdirectory, this is * the inode number of the surviving parent and the dotdot entry will * be set to this value. If the value is NULLFSINO, then use @root_ino * as a stand-in until the orphanage can adopt the subdirectory. */ xfs_ino_t parent_ino; /* Scratch buffer for scanning pptr xattrs */ struct xfs_parent_rec pptr_rec; struct xfs_da_args pptr_args; /* Name buffer */ struct xfs_name xname; char namebuf[MAXNAMELEN]; /* Information for reparenting this directory. */ struct xrep_adoption adoption; /* * Hook into directory updates so that we can receive live updates * from other writer threads. */ struct xfs_dir_hook dhook; /* Parent pointer update arguments. */ struct xfs_parent_args ppargs; /* lock for everything below here */ struct mutex lock; /* buffer for the live update functions to use for dirent names */ struct xfs_name hook_xname; unsigned char hook_namebuf[MAXNAMELEN]; /* * All path steps observed during this scan. Each of the path * steps for a particular pathwalk are recorded in sequential * order in the xfarray. A pathwalk ends either with a step * pointing to the root directory (success) or pointing to NULLFSINO * (loop detected, empty dir detected, etc). */ struct xfarray *path_steps; /* All names observed during this scan. */ struct xfblob *path_names; /* All paths being tracked by this scanner. */ struct list_head path_list; /* Number of paths in path_list. */ unsigned int nr_paths; /* Number of parents found by a pptr scan. */ unsigned int parents_found; /* Have the path data been invalidated by a concurrent update? */ bool stale:1; /* Has the scan been aborted? */ bool aborted:1; }; #define xchk_dirtree_for_each_path_safe(dl, path, n) \ list_for_each_entry_safe((path), (n), &(dl)->path_list, list) #define xchk_dirtree_for_each_path(dl, path) \ list_for_each_entry((path), &(dl)->path_list, list) bool xchk_dirtree_parentless(const struct xchk_dirtree *dl); int xchk_dirtree_find_paths_to_root(struct xchk_dirtree *dl); int xchk_dirpath_append(struct xchk_dirtree *dl, struct xfs_inode *ip, struct xchk_dirpath *path, const struct xfs_name *name, const struct xfs_parent_rec *pptr); void xchk_dirtree_evaluate(struct xchk_dirtree *dl, struct xchk_dirtree_outcomes *oc); #endif /* __XFS_SCRUB_DIRTREE_H__ */
/* SPDX-License-Identifier: GPL-2.0 */ /* * SE/HMC Drive FTP Device * * Copyright IBM Corp. 2013 * Author(s): Ralf Hoppe ([email protected]) */ #ifndef __HMCDRV_DEV_H__ #define __HMCDRV_DEV_H__ int hmcdrv_dev_init(void); void hmcdrv_dev_exit(void); #endif /* __HMCDRV_DEV_H__ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Imagination Technologies * Author: Paul Burton <[email protected]> */ #include <linux/cpuhotplug.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/suspend.h> #include <asm/asm-offsets.h> #include <asm/cacheflush.h> #include <asm/cacheops.h> #include <asm/idle.h> #include <asm/mips-cps.h> #include <asm/mipsmtregs.h> #include <asm/pm.h> #include <asm/pm-cps.h> #include <asm/regdef.h> #include <asm/smp-cps.h> #include <asm/uasm.h> /* * cps_nc_entry_fn - type of a generated non-coherent state entry function * @online: the count of online coupled VPEs * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count * * The code entering & exiting non-coherent states is generated at runtime * using uasm, in order to ensure that the compiler cannot insert a stray * memory access at an unfortunate time and to allow the generation of optimal * core-specific code particularly for cache routines. If coupled_coherence * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, * returns the number of VPEs that were in the wait state at the point this * VPE left it. Returns garbage if coupled_coherence is zero or this is not * the entry function for CPS_PM_NC_WAIT. */ typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); /* * The entry point of the generated non-coherent idle state entry/exit * functions. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], nc_asm_enter); /* Bitmap indicating which states are supported by the system */ static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); /* * Indicates the number of coupled VPEs ready to operate in a non-coherent * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); /* * Used to synchronize entry to deep idle states. Actually per-core rather * than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); /* Saved CPU state across the CPS_PM_POWER_GATED state */ DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); /* A somewhat arbitrary number of labels & relocs for uasm */ static struct uasm_label labels[32]; static struct uasm_reloc relocs[32]; bool cps_pm_support_state(enum cps_pm_state state) { return test_bit(state, state_support); } static void coupled_barrier(atomic_t *a, unsigned online) { /* * This function is effectively the same as * cpuidle_coupled_parallel_barrier, which can't be used here since * there's no cpuidle device. */ if (!coupled_coherence) return; smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < online) cpu_relax(); if (atomic_inc_return(a) == online * 2) { atomic_set(a, 0); return; } while (atomic_read(a) > online) cpu_relax(); } int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); unsigned core = cpu_core(&current_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; void *nc_addr; cps_nc_entry_fn entry; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; /* Check that there is an entry function for this state */ entry = per_cpu(nc_asm_enter, core)[state]; if (!entry) return -EINVAL; /* Calculate which coupled CPUs (VPEs) are online */ #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) if (cpu_online(cpu)) { cpumask_and(coupled_mask, cpu_online_mask, &cpu_sibling_map[cpu]); online = cpumask_weight(coupled_mask); cpumask_clear_cpu(cpu, coupled_mask); } else #endif { cpumask_clear(coupled_mask); online = 1; } /* Setup the VPE to run mips_cps_pm_restore when started again */ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { /* Power gating relies upon CPS SMP */ if (!mips_cps_smp_in_use()) return -EINVAL; core_cfg = &mips_cps_core_bootcfg[core]; vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)]; vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; vpe_cfg->gp = (unsigned long)current_thread_info(); vpe_cfg->sp = 0; } /* Indicate that this CPU might not be coherent */ cpumask_clear_cpu(cpu, &cpu_coherent_mask); smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ core_ready_count = per_cpu(ready_count, core); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); nc_core_ready_count = nc_addr; /* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); coupled_barrier(&per_cpu(pm_barrier, core), online); /* Run the generated entry code */ left = entry(online, nc_core_ready_count); /* Remove the non-coherent mapping of ready_count */ kunmap_noncoherent(); /* Indicate that this CPU is definitely coherent */ cpumask_set_cpu(cpu, &cpu_coherent_mask); /* * If this VPE is the first to leave the non-coherent wait state then * it needs to wake up any coupled VPEs still running their wait * instruction so that they return to cpuidle, which can then complete * coordination between the coupled VPEs & provide the governor with * a chance to reflect on the length of time the VPEs were in the * idle state. */ if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) arch_send_call_function_ipi_mask(coupled_mask); return 0; } static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, const struct cache_desc *cache, unsigned op, int lbl) { unsigned cache_size = cache->ways << cache->waybit; unsigned i; const unsigned unroll_lines = 32; /* If the cache isn't present this function has it easy */ if (cache->flags & MIPS_CACHE_NOT_PRESENT) return; /* Load base address */ UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Calculate end address */ if (cache_size < 0x8000) uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); else UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); /* Start of cache op loop */ uasm_build_label(pl, *pp, lbl); /* Generate the cache ops */ for (i = 0; i < unroll_lines; i++) { if (cpu_has_mips_r6) { uasm_i_cache(pp, op, 0, GPR_T0); uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); } else { uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); } } if (!cpu_has_mips_r6) /* Update the base address */ uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); /* Loop if we haven't reached the end address yet */ uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); uasm_i_nop(pp); } static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, const struct cpuinfo_mips *cpu_info, int lbl) { unsigned i, fsb_size = 8; unsigned num_loads = (fsb_size * 3) / 2; unsigned line_stride = 2; unsigned line_size = cpu_info->dcache.linesz; unsigned perf_counter, perf_event; unsigned revision = cpu_info->processor_id & PRID_REV_MASK; /* * Determine whether this CPU requires an FSB flush, and if so which * performance counter/event reflect stalls due to a full FSB. */ switch (__get_cpu_type(cpu_info->cputype)) { case CPU_INTERAPTIV: perf_counter = 1; perf_event = 51; break; case CPU_PROAPTIV: /* Newer proAptiv cores don't require this workaround */ if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) return 0; /* On older ones it's unavailable */ return -1; default: /* Assume that the CPU does not need this workaround */ return 0; } /* * Ensure that the fill/store buffer (FSB) is not holding the results * of a prefetch, since if it is then the CPC sequencer may become * stuck in the D3 (ClrBus) state whilst entering a low power state. */ /* Preserve perf counter setup */ uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Setup perf counter to count FSB full pipeline stalls */ uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); /* Base address for loads */ UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Start of clear loop */ uasm_build_label(pl, *pp, lbl); /* Perform some loads to fill the FSB */ for (i = 0; i < num_loads; i++) uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); /* * Invalidate the new D-cache entries so that the cache will need * refilling (via the FSB) if the loop is executed again. */ for (i = 0; i < num_loads; i++) { uasm_i_cache(pp, Hit_Invalidate_D, i * line_size * line_stride, GPR_T0); uasm_i_cache(pp, Hit_Writeback_Inv_SD, i * line_size * line_stride, GPR_T0); } /* Barrier ensuring previous cache invalidates are complete */ uasm_i_sync(pp, __SYNC_full); uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Loop if it didn't */ uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); /* Restore perf counter 1. The count may well now be wrong... */ uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); return 0; } static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, unsigned r_addr, int lbl) { uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); uasm_build_label(pl, *pp, lbl); uasm_i_ll(pp, GPR_T1, 0, r_addr); uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); uasm_i_sc(pp, GPR_T1, 0, r_addr); uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); } static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) { struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; const unsigned r_online = GPR_A0; const unsigned r_nc_count = GPR_A1; const unsigned r_pcohctl = GPR_T8; const unsigned max_instrs = 256; unsigned cpc_cmd; int err; enum { lbl_incready = 1, lbl_poll_cont, lbl_secondary_hang, lbl_disable_coherence, lbl_flush_fsb, lbl_invicache, lbl_flushdcache, lbl_hang, lbl_set_cont, lbl_secondary_cont, lbl_decready, }; /* Allocate a buffer to hold the generated code */ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); if (!buf) return NULL; /* Clear labels & relocs ready for (re)use */ memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { /* Power gating relies upon CPS SMP */ if (!mips_cps_smp_in_use()) goto out_err; /* * Save CPU state. Note the non-standard calling convention * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); uasm_i_jalr(&p, GPR_V0, GPR_T0); uasm_i_nop(&p); } /* * Load addresses of required CM & CPC registers. This is done early * because they're needed in both the enable & disable coherence steps * but in the coupled case the enable step will only run on one VPE. */ UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); if (coupled_coherence) { /* Increment ready_count */ uasm_i_sync(&p, __SYNC_mb); uasm_build_label(&l, p, lbl_incready); uasm_i_ll(&p, GPR_T1, 0, r_nc_count); uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); uasm_i_sc(&p, GPR_T2, 0, r_nc_count); uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); /* * If this is the last VPE to become ready for non-coherence * then it should branch below. */ uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { /* * Otherwise this is not the last VPE to become ready * for non-coherence. It needs to wait until coherence * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); uasm_build_label(&l, p, lbl_poll_cont); uasm_i_lw(&p, GPR_T0, 0, r_nc_count); uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); uasm_i_ehb(&p); if (cpu_has_mipsmt) uasm_i_yield(&p, GPR_ZERO, GPR_T1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { /* * The core will lose power & this VPE will not continue * so it can simply halt here. */ if (cpu_has_mipsmt) { /* Halt the VPE via C0 tchalt register */ uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); uasm_i_mtc0(&p, GPR_T0, 2, 4); } else if (cpu_has_vp) { /* Halt the VP via the CPC VP_STOP register */ unsigned int vpe_id; vpe_id = cpu_vpe_id(&cpu_data[cpu]); uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); uasm_i_sw(&p, GPR_T0, 0, GPR_T1); } else { BUG(); } uasm_build_label(&l, p, lbl_secondary_hang); uasm_il_b(&p, &r, lbl_secondary_hang); uasm_i_nop(&p); } } /* * This is the point of no return - this VPE will now proceed to * disable coherence. At this point we *must* be sure that no other * VPE within the core will interfere with the L1 dcache. */ uasm_build_label(&l, p, lbl_disable_coherence); /* Invalidate the L1 icache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, Index_Invalidate_I, lbl_invicache); /* Writeback & invalidate the L1 dcache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, Index_Writeback_Inv_D, lbl_flushdcache); /* Barrier ensuring previous cache invalidates are complete */ uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); if (mips_cm_revision() < CM_REV_CM3) { /* * Disable all but self interventions. The load from COHCTL is * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); } /* Disable coherence */ uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], lbl_flush_fsb); if (err) goto out_err; /* Determine the CPC command to issue */ switch (state) { case CPS_PM_CLOCK_GATED: cpc_cmd = CPC_Cx_CMD_CLOCKOFF; break; case CPS_PM_POWER_GATED: cpc_cmd = CPC_Cx_CMD_PWRDOWN; break; default: BUG(); goto out_err; } /* Issue the CPC command */ UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); uasm_i_sw(&p, GPR_T1, 0, GPR_T0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ uasm_build_label(&l, p, lbl_hang); uasm_il_b(&p, &r, lbl_hang); uasm_i_nop(&p); /* * There's no point generating more code, the core is * powered down & if powered back up will run from the * reset vector not from here. */ goto gen_done; } /* Barrier to ensure write to CPC command is complete */ uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); } if (state == CPS_PM_NC_WAIT) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ if (coupled_coherence) cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * VPEs which did not disable coherence will continue * executing, after coherence has been disabled, from this * point. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Now perform our wait */ uasm_i_wait(&p, 0); } /* * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN : CM3_GCR_Cx_COHERENCE_COHEN); uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, __SYNC_mb); uasm_i_ll(&p, GPR_T1, 0, r_nc_count); uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); uasm_i_sc(&p, GPR_T2, 0, r_nc_count); uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); } if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * This core will be reliant upon another core sending a * power-up command to the CPC in order to resume operation. * Thus an arbitrary VPE can't trigger the core leaving the * idle state and the one that disables coherence might as well * be the one to re-enable it. The rest will continue from here * after that has been done. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); } /* The core is coherent, time to return to C code */ uasm_i_jr(&p, GPR_RA); uasm_i_nop(&p); gen_done: /* Ensure the code didn't exceed the resources allocated for it */ BUG_ON((p - buf) > max_instrs); BUG_ON((l - labels) > ARRAY_SIZE(labels)); BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); /* Patch branch offsets */ uasm_resolve_relocs(relocs, labels); /* Flush the icache */ local_flush_icache_range((unsigned long)buf, (unsigned long)p); return buf; out_err: kfree(buf); return NULL; } static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_core(&cpu_data[cpu]); void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { if (per_cpu(nc_asm_enter, core)[state]) continue; if (!test_bit(state, state_support)) continue; entry_fn = cps_gen_entry_code(cpu, state); if (!entry_fn) { pr_err("Failed to generate core %u state %u entry\n", core, state); clear_bit(state, state_support); } per_cpu(nc_asm_enter, core)[state] = entry_fn; } if (!per_cpu(ready_count, core)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } per_cpu(ready_count, core) = core_rc; } return 0; } static int cps_pm_power_notifier(struct notifier_block *this, unsigned long event, void *ptr) { unsigned int stat; switch (event) { case PM_SUSPEND_PREPARE: stat = read_cpc_cl_stat_conf(); /* * If we're attempting to suspend the system and power down all * of the cores, the JTAG detect bit indicates that the CPC will * instead put the cores into clock-off state. In this state * a connected debugger can cause the CPU to attempt * interactions with the powered down system. At best this will * fail. At worst, it can hang the NoC, requiring a hard reset. * To avoid this, just block system suspend if a JTAG probe * is detected. */ if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) { pr_warn("JTAG probe is connected - abort suspend\n"); return NOTIFY_BAD; } return NOTIFY_DONE; default: return NOTIFY_DONE; } } static int __init cps_pm_init(void) { /* A CM is required for all non-coherent states */ if (!mips_cm_present()) { pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); return 0; } /* * If interrupts were enabled whilst running a wait instruction on a * non-coherent core then the VPE may end up processing interrupts * whilst non-coherent. That would be bad. */ if (cpu_wait == r4k_wait_irqoff) set_bit(CPS_PM_NC_WAIT, state_support); else pr_warn("pm-cps: non-coherent wait unavailable\n"); /* Detect whether a CPC is present */ if (mips_cpc_present()) { /* Detect whether clock gating is implemented */ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) set_bit(CPS_PM_CLOCK_GATED, state_support); else pr_warn("pm-cps: CPC does not support clock gating\n"); /* Power gating is available with CPS SMP & any CPC */ if (mips_cps_smp_in_use()) set_bit(CPS_PM_POWER_GATED, state_support); else pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); } else { pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); } pm_notifier(cps_pm_power_notifier, 0); return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online", cps_pm_online_cpu, NULL); } arch_initcall(cps_pm_init);
// SPDX-License-Identifier: GPL-2.0 /* * Sensirion SCD4X carbon dioxide sensor i2c driver * * Copyright (C) 2021 Protonic Holland * Author: Roan van Dijk <[email protected]> * * I2C slave address: 0x62 * * Datasheets: * https://www.sensirion.com/file/datasheet_scd4x */ #include <linux/unaligned.h> #include <linux/crc8.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/iio/buffer.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/types.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/types.h> #define SCD4X_CRC8_POLYNOMIAL 0x31 #define SCD4X_TIMEOUT_ERR 1000 #define SCD4X_READ_BUF_SIZE 9 #define SCD4X_COMMAND_BUF_SIZE 2 #define SCD4X_WRITE_BUF_SIZE 5 #define SCD4X_FRC_MIN_PPM 0 #define SCD4X_FRC_MAX_PPM 2000 #define SCD4X_PRESSURE_COMP_MIN_MBAR 700 #define SCD4X_PRESSURE_COMP_MAX_MBAR 1200 #define SCD4X_READY_MASK 0x01 /*Commands SCD4X*/ enum scd4x_cmd { CMD_START_MEAS = 0x21b1, CMD_READ_MEAS = 0xec05, CMD_STOP_MEAS = 0x3f86, CMD_SET_TEMP_OFFSET = 0x241d, CMD_GET_TEMP_OFFSET = 0x2318, CMD_SET_AMB_PRESSURE = 0xe000, CMD_GET_AMB_PRESSURE = 0xe000, CMD_FRC = 0x362f, CMD_SET_ASC = 0x2416, CMD_GET_ASC = 0x2313, CMD_GET_DATA_READY = 0xe4b8, }; enum scd4x_channel_idx { SCD4X_CO2, SCD4X_TEMP, SCD4X_HR, }; struct scd4x_state { struct i2c_client *client; /* maintain access to device, to prevent concurrent reads/writes */ struct mutex lock; struct regulator *vdd; }; DECLARE_CRC8_TABLE(scd4x_crc8_table); static int scd4x_i2c_xfer(struct scd4x_state *state, char *txbuf, int txsize, char *rxbuf, int rxsize) { struct i2c_client *client = state->client; int ret; ret = i2c_master_send(client, txbuf, txsize); if (ret < 0) return ret; if (ret != txsize) return -EIO; if (rxsize == 0) return 0; ret = i2c_master_recv(client, rxbuf, rxsize); if (ret < 0) return ret; if (ret != rxsize) return -EIO; return 0; } static int scd4x_send_command(struct scd4x_state *state, enum scd4x_cmd cmd) { char buf[SCD4X_COMMAND_BUF_SIZE]; int ret; /* * Measurement needs to be stopped before sending commands. * Except stop and start command. */ if ((cmd != CMD_STOP_MEAS) && (cmd != CMD_START_MEAS)) { ret = scd4x_send_command(state, CMD_STOP_MEAS); if (ret) return ret; /* execution time for stopping measurement */ msleep_interruptible(500); } put_unaligned_be16(cmd, buf); ret = scd4x_i2c_xfer(state, buf, 2, buf, 0); if (ret) return ret; if ((cmd != CMD_STOP_MEAS) && (cmd != CMD_START_MEAS)) { ret = scd4x_send_command(state, CMD_START_MEAS); if (ret) return ret; } return 0; } static int scd4x_read(struct scd4x_state *state, enum scd4x_cmd cmd, void *response, int response_sz) { struct i2c_client *client = state->client; char buf[SCD4X_READ_BUF_SIZE]; char *rsp = response; int i, ret; char crc; /* * Measurement needs to be stopped before sending commands. * Except for reading measurement and data ready command. */ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS) && (cmd != CMD_GET_AMB_PRESSURE)) { ret = scd4x_send_command(state, CMD_STOP_MEAS); if (ret) return ret; /* execution time for stopping measurement */ msleep_interruptible(500); } /* CRC byte for every 2 bytes of data */ response_sz += response_sz / 2; put_unaligned_be16(cmd, buf); ret = scd4x_i2c_xfer(state, buf, 2, buf, response_sz); if (ret) return ret; for (i = 0; i < response_sz; i += 3) { crc = crc8(scd4x_crc8_table, buf + i, 2, CRC8_INIT_VALUE); if (crc != buf[i + 2]) { dev_err(&client->dev, "CRC error\n"); return -EIO; } *rsp++ = buf[i]; *rsp++ = buf[i + 1]; } /* start measurement */ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS) && (cmd != CMD_GET_AMB_PRESSURE)) { ret = scd4x_send_command(state, CMD_START_MEAS); if (ret) return ret; } return 0; } static int scd4x_write(struct scd4x_state *state, enum scd4x_cmd cmd, uint16_t arg) { char buf[SCD4X_WRITE_BUF_SIZE]; int ret; char crc; put_unaligned_be16(cmd, buf); put_unaligned_be16(arg, buf + 2); crc = crc8(scd4x_crc8_table, buf + 2, 2, CRC8_INIT_VALUE); buf[4] = crc; /* measurement needs to be stopped before sending commands */ if (cmd != CMD_SET_AMB_PRESSURE) { ret = scd4x_send_command(state, CMD_STOP_MEAS); if (ret) return ret; } /* execution time */ msleep_interruptible(500); ret = scd4x_i2c_xfer(state, buf, SCD4X_WRITE_BUF_SIZE, buf, 0); if (ret) return ret; /* start measurement, except for forced calibration command */ if ((cmd != CMD_FRC) && (cmd != CMD_SET_AMB_PRESSURE)) { ret = scd4x_send_command(state, CMD_START_MEAS); if (ret) return ret; } return 0; } static int scd4x_write_and_fetch(struct scd4x_state *state, enum scd4x_cmd cmd, uint16_t arg, void *response, int response_sz) { struct i2c_client *client = state->client; char buf[SCD4X_READ_BUF_SIZE]; char *rsp = response; int i, ret; char crc; ret = scd4x_write(state, CMD_FRC, arg); if (ret) goto err; /* execution time */ msleep_interruptible(400); /* CRC byte for every 2 bytes of data */ response_sz += response_sz / 2; ret = i2c_master_recv(client, buf, response_sz); if (ret < 0) goto err; if (ret != response_sz) { ret = -EIO; goto err; } for (i = 0; i < response_sz; i += 3) { crc = crc8(scd4x_crc8_table, buf + i, 2, CRC8_INIT_VALUE); if (crc != buf[i + 2]) { dev_err(&client->dev, "CRC error\n"); ret = -EIO; goto err; } *rsp++ = buf[i]; *rsp++ = buf[i + 1]; } return scd4x_send_command(state, CMD_START_MEAS); err: /* * on error try to start the measurement, * puts sensor back into continuous measurement */ scd4x_send_command(state, CMD_START_MEAS); return ret; } static int scd4x_read_meas(struct scd4x_state *state, uint16_t *meas) { int i, ret; __be16 buf[3]; ret = scd4x_read(state, CMD_READ_MEAS, buf, sizeof(buf)); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(buf); i++) meas[i] = be16_to_cpu(buf[i]); return 0; } static int scd4x_wait_meas_poll(struct scd4x_state *state) { struct i2c_client *client = state->client; int tries = 6; int ret; do { __be16 bval; uint16_t val; ret = scd4x_read(state, CMD_GET_DATA_READY, &bval, sizeof(bval)); if (ret) return -EIO; val = be16_to_cpu(bval); /* new measurement available */ if (val & 0x7FF) return 0; msleep_interruptible(1000); } while (--tries); /* try to start sensor on timeout */ ret = scd4x_send_command(state, CMD_START_MEAS); if (ret) dev_err(&client->dev, "failed to start measurement: %d\n", ret); return -ETIMEDOUT; } static int scd4x_read_poll(struct scd4x_state *state, uint16_t *buf) { int ret; ret = scd4x_wait_meas_poll(state); if (ret) return ret; return scd4x_read_meas(state, buf); } static int scd4x_read_channel(struct scd4x_state *state, int chan) { int ret; uint16_t buf[3]; ret = scd4x_read_poll(state, buf); if (ret) return ret; return buf[chan]; } static int scd4x_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct scd4x_state *state = iio_priv(indio_dev); int ret; __be16 tmp; switch (mask) { case IIO_CHAN_INFO_RAW: if (chan->output) { mutex_lock(&state->lock); ret = scd4x_read(state, CMD_GET_AMB_PRESSURE, &tmp, sizeof(tmp)); mutex_unlock(&state->lock); if (ret) return ret; *val = be16_to_cpu(tmp); return IIO_VAL_INT; } ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; mutex_lock(&state->lock); ret = scd4x_read_channel(state, chan->address); mutex_unlock(&state->lock); iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_CONCENTRATION) { *val = 0; *val2 = 100; return IIO_VAL_INT_PLUS_MICRO; } else if (chan->type == IIO_TEMP) { *val = 175000; *val2 = 65536; return IIO_VAL_FRACTIONAL; } else if (chan->type == IIO_HUMIDITYRELATIVE) { *val = 100000; *val2 = 65536; return IIO_VAL_FRACTIONAL; } return -EINVAL; case IIO_CHAN_INFO_OFFSET: *val = -16852; *val2 = 114286; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_CALIBBIAS: mutex_lock(&state->lock); ret = scd4x_read(state, CMD_GET_TEMP_OFFSET, &tmp, sizeof(tmp)); mutex_unlock(&state->lock); if (ret) return ret; *val = be16_to_cpu(tmp); return IIO_VAL_INT; default: return -EINVAL; } } static const int scd4x_pressure_calibbias_available[] = { SCD4X_PRESSURE_COMP_MIN_MBAR, 1, SCD4X_PRESSURE_COMP_MAX_MBAR, }; static int scd4x_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long mask) { switch (mask) { case IIO_CHAN_INFO_RAW: *vals = scd4x_pressure_calibbias_available; *type = IIO_VAL_INT; return IIO_AVAIL_RANGE; } return -EINVAL; } static int scd4x_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct scd4x_state *state = iio_priv(indio_dev); int ret = 0; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: mutex_lock(&state->lock); ret = scd4x_write(state, CMD_SET_TEMP_OFFSET, val); mutex_unlock(&state->lock); return ret; case IIO_CHAN_INFO_RAW: switch (chan->type) { case IIO_PRESSURE: if (val < SCD4X_PRESSURE_COMP_MIN_MBAR || val > SCD4X_PRESSURE_COMP_MAX_MBAR) return -EINVAL; mutex_lock(&state->lock); ret = scd4x_write(state, CMD_SET_AMB_PRESSURE, val); mutex_unlock(&state->lock); return ret; default: return -EINVAL; } default: return -EINVAL; } } static ssize_t calibration_auto_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct scd4x_state *state = iio_priv(indio_dev); int ret; __be16 bval; u16 val; mutex_lock(&state->lock); ret = scd4x_read(state, CMD_GET_ASC, &bval, sizeof(bval)); mutex_unlock(&state->lock); if (ret) { dev_err(dev, "failed to read automatic calibration"); return ret; } val = (be16_to_cpu(bval) & SCD4X_READY_MASK) ? 1 : 0; return sysfs_emit(buf, "%d\n", val); } static ssize_t calibration_auto_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct scd4x_state *state = iio_priv(indio_dev); bool val; int ret; uint16_t value; ret = kstrtobool(buf, &val); if (ret) return ret; value = val; mutex_lock(&state->lock); ret = scd4x_write(state, CMD_SET_ASC, value); mutex_unlock(&state->lock); if (ret) dev_err(dev, "failed to set automatic calibration"); return ret ?: len; } static ssize_t calibration_forced_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct scd4x_state *state = iio_priv(indio_dev); uint16_t val, arg; int ret; ret = kstrtou16(buf, 0, &arg); if (ret) return ret; if (arg < SCD4X_FRC_MIN_PPM || arg > SCD4X_FRC_MAX_PPM) return -EINVAL; mutex_lock(&state->lock); ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val)); mutex_unlock(&state->lock); if (ret) return ret; if (val == 0xff) { dev_err(dev, "forced calibration has failed"); return -EINVAL; } return len; } static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0); static IIO_DEVICE_ATTR_WO(calibration_forced_value, 0); static IIO_CONST_ATTR(calibration_forced_value_available, __stringify([SCD4X_FRC_MIN_PPM 1 SCD4X_FRC_MAX_PPM])); static struct attribute *scd4x_attrs[] = { &iio_dev_attr_calibration_auto_enable.dev_attr.attr, &iio_dev_attr_calibration_forced_value.dev_attr.attr, &iio_const_attr_calibration_forced_value_available.dev_attr.attr, NULL }; static const struct attribute_group scd4x_attr_group = { .attrs = scd4x_attrs, }; static const struct iio_info scd4x_info = { .attrs = &scd4x_attr_group, .read_raw = scd4x_read_raw, .write_raw = scd4x_write_raw, .read_avail = scd4x_read_avail, }; static const struct iio_chan_spec scd4x_channels[] = { { /* * this channel is special in a sense we are pretending that * sensor is able to change measurement chamber pressure but in * fact we're just setting pressure compensation value */ .type = IIO_PRESSURE, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW), .output = 1, .scan_index = -1, }, { .type = IIO_CONCENTRATION, .channel2 = IIO_MOD_CO2, .modified = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .address = SCD4X_CO2, .scan_index = SCD4X_CO2, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, .endianness = IIO_BE, }, }, { .type = IIO_TEMP, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_CALIBBIAS), .address = SCD4X_TEMP, .scan_index = SCD4X_TEMP, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, .endianness = IIO_BE, }, }, { .type = IIO_HUMIDITYRELATIVE, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .address = SCD4X_HR, .scan_index = SCD4X_HR, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, .endianness = IIO_BE, }, }, }; static int scd4x_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct scd4x_state *state = iio_priv(indio_dev); int ret; ret = scd4x_send_command(state, CMD_STOP_MEAS); if (ret) return ret; return regulator_disable(state->vdd); } static int scd4x_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct scd4x_state *state = iio_priv(indio_dev); int ret; ret = regulator_enable(state->vdd); if (ret) return ret; return scd4x_send_command(state, CMD_START_MEAS); } static DEFINE_SIMPLE_DEV_PM_OPS(scd4x_pm_ops, scd4x_suspend, scd4x_resume); static void scd4x_stop_meas(void *state) { scd4x_send_command(state, CMD_STOP_MEAS); } static void scd4x_disable_regulator(void *data) { struct scd4x_state *state = data; regulator_disable(state->vdd); } static irqreturn_t scd4x_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct scd4x_state *state = iio_priv(indio_dev); struct { uint16_t data[3]; int64_t ts __aligned(8); } scan; int ret; memset(&scan, 0, sizeof(scan)); mutex_lock(&state->lock); ret = scd4x_read_poll(state, scan.data); mutex_unlock(&state->lock); if (ret) goto out; iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static int scd4x_probe(struct i2c_client *client) { static const unsigned long scd4x_scan_masks[] = { 0x07, 0x00 }; struct device *dev = &client->dev; struct iio_dev *indio_dev; struct scd4x_state *state; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*state)); if (!indio_dev) return -ENOMEM; state = iio_priv(indio_dev); mutex_init(&state->lock); state->client = client; crc8_populate_msb(scd4x_crc8_table, SCD4X_CRC8_POLYNOMIAL); indio_dev->info = &scd4x_info; indio_dev->name = client->name; indio_dev->channels = scd4x_channels; indio_dev->num_channels = ARRAY_SIZE(scd4x_channels); indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->available_scan_masks = scd4x_scan_masks; state->vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(state->vdd)) return dev_err_probe(dev, PTR_ERR(state->vdd), "failed to get regulator\n"); ret = regulator_enable(state->vdd); if (ret) return ret; ret = devm_add_action_or_reset(dev, scd4x_disable_regulator, state); if (ret) return ret; ret = scd4x_send_command(state, CMD_STOP_MEAS); if (ret) { dev_err(dev, "failed to stop measurement: %d\n", ret); return ret; } /* execution time */ msleep_interruptible(500); ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, scd4x_trigger_handler, NULL); if (ret) return ret; ret = scd4x_send_command(state, CMD_START_MEAS); if (ret) { dev_err(dev, "failed to start measurement: %d\n", ret); return ret; } ret = devm_add_action_or_reset(dev, scd4x_stop_meas, state); if (ret) return ret; return devm_iio_device_register(dev, indio_dev); } static const struct of_device_id scd4x_dt_ids[] = { { .compatible = "sensirion,scd40" }, { .compatible = "sensirion,scd41" }, { } }; MODULE_DEVICE_TABLE(of, scd4x_dt_ids); static struct i2c_driver scd4x_i2c_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = scd4x_dt_ids, .pm = pm_sleep_ptr(&scd4x_pm_ops), }, .probe = scd4x_probe, }; module_i2c_driver(scd4x_i2c_driver); MODULE_AUTHOR("Roan van Dijk <[email protected]>"); MODULE_DESCRIPTION("Sensirion SCD4X carbon dioxide sensor core driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ #ifndef __DT_BINDINGS_RESET_SAMA7G5_H #define __DT_BINDINGS_RESET_SAMA7G5_H #define SAMA7G5_RESET_USB_PHY1 4 #define SAMA7G5_RESET_USB_PHY2 5 #define SAMA7G5_RESET_USB_PHY3 6 #endif /* __DT_BINDINGS_RESET_SAMA7G5_H */
/* * linux/drivers/video/68328fb.c -- Low level implementation of the * mc68x328 LCD frame buffer device * * Copyright (C) 2003 Georges Menie * * This driver assumes an already configured controller (e.g. from config.c) * Keep the code clean of board specific initialization. * * This code has not been tested with colors, colormap management functions * are minimal (no colormap data written to the 68328 registers...) * * initial version of this driver: * Copyright (C) 1998,1999 Kenneth Albanowski <[email protected]>, * The Silver Hammer Group, Ltd. * * this version is based on : * * linux/drivers/video/vfb.c -- Virtual frame buffer device * * Copyright (C) 2002 James Simmons * * Copyright (C) 1997 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/fb.h> #include <linux/init.h> #if defined(CONFIG_M68VZ328) #include <asm/MC68VZ328.h> #elif defined(CONFIG_M68EZ328) #include <asm/MC68EZ328.h> #elif defined(CONFIG_M68328) #include <asm/MC68328.h> #else #error wrong architecture for the MC68x328 frame buffer device #endif static u_long videomemory; static u_long videomemorysize; static struct fb_info fb_info; static u32 mc68x328fb_pseudo_palette[16]; static struct fb_var_screeninfo mc68x328fb_default __initdata = { .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .activate = FB_ACTIVATE_TEST, .height = -1, .width = -1, .pixclock = 20000, .left_margin = 64, .right_margin = 64, .upper_margin = 32, .lower_margin = 32, .hsync_len = 64, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED, }; static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = { .id = "68328fb", .type = FB_TYPE_PACKED_PIXELS, .xpanstep = 1, .ypanstep = 1, .ywrapstep = 1, .accel = FB_ACCEL_NONE, }; /* * Interface used by the world */ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int mc68x328fb_set_par(struct fb_info *info); static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int mc68x328fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma); static const struct fb_ops mc68x328fb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_IOMEM_OPS_RDWR, .fb_check_var = mc68x328fb_check_var, .fb_set_par = mc68x328fb_set_par, .fb_setcolreg = mc68x328fb_setcolreg, .fb_pan_display = mc68x328fb_pan_display, __FB_DEFAULT_IOMEM_OPS_DRAW, .fb_mmap = mc68x328fb_mmap, }; /* * Internal routines */ static u_long get_line_length(int xres_virtual, int bpp) { u_long length; length = xres_virtual * bpp; length = (length + 31) & ~31; length >>= 3; return (length); } /* * Setting the video mode has been split into two parts. * First part, xxxfb_check_var, must not write anything * to hardware, it should only verify and adjust var. * This means it doesn't alter par but it does use hardware * data from it to check this var. */ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u_long line_length; /* * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! * as FB_VMODE_SMOOTH_XPAN is only used internally */ if (var->vmode & FB_VMODE_CONUPDATE) { var->vmode |= FB_VMODE_YWRAP; var->xoffset = info->var.xoffset; var->yoffset = info->var.yoffset; } /* * Some very basic checks */ if (!var->xres) var->xres = 1; if (!var->yres) var->yres = 1; if (var->xres > var->xres_virtual) var->xres_virtual = var->xres; if (var->yres > var->yres_virtual) var->yres_virtual = var->yres; if (var->bits_per_pixel <= 1) var->bits_per_pixel = 1; else if (var->bits_per_pixel <= 8) var->bits_per_pixel = 8; else if (var->bits_per_pixel <= 16) var->bits_per_pixel = 16; else if (var->bits_per_pixel <= 24) var->bits_per_pixel = 24; else if (var->bits_per_pixel <= 32) var->bits_per_pixel = 32; else return -EINVAL; if (var->xres_virtual < var->xoffset + var->xres) var->xres_virtual = var->xoffset + var->xres; if (var->yres_virtual < var->yoffset + var->yres) var->yres_virtual = var->yoffset + var->yres; /* * Memory limit */ line_length = get_line_length(var->xres_virtual, var->bits_per_pixel); if (line_length * var->yres_virtual > videomemorysize) return -ENOMEM; /* * Now that we checked it we alter var. The reason being is that the video * mode passed in might not work but slight changes to it might make it * work. This way we let the user know what is acceptable. */ switch (var->bits_per_pixel) { case 1: var->red.offset = 0; var->red.length = 1; var->green.offset = 0; var->green.length = 1; var->blue.offset = 0; var->blue.length = 1; var->transp.offset = 0; var->transp.length = 0; break; case 8: var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGBA 5551 */ if (var->transp.length) { var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; } else { /* RGB 565 */ var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; } break; case 24: /* RGB 888 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGBA 8888 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; return 0; } /* This routine actually sets the video mode. It's in here where we * the hardware state info->par and fix which can be affected by the * change in par. For this driver it doesn't do much. */ static int mc68x328fb_set_par(struct fb_info *info) { info->fix.line_length = get_line_length(info->var.xres_virtual, info->var.bits_per_pixel); return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno >= 256) /* no. of hw registers */ return 1; /* * Program hardware... do anything you want with transp */ /* grayscale works only partially under directcolor */ if (info->var.grayscale) { /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } /* Directcolor: * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * {hardwarespecific} contains width of RAMDAC * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) * * Pseudocolor: * uses offset = 0 && length = RAMDAC register width. * var->{color}.offset is 0 * var->{color}.length contains width of DAC * cmap is not used * RAMDAC[X] is programmed to (red, green, blue) * Truecolor: * does not use DAC. Usually 3 are present. * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * cmap is programmed to (red << red.offset) | (green << green.offset) | * (blue << blue.offset) | (transp << transp.offset) * RAMDAC does not exist */ #define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: case FB_VISUAL_PSEUDOCOLOR: red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); break; case FB_VISUAL_DIRECTCOLOR: red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */ green = CNVT_TOHW(green, 8); blue = CNVT_TOHW(blue, 8); /* hey, there is bug in transp handling... */ transp = CNVT_TOHW(transp, 8); break; } #undef CNVT_TOHW /* Truecolor has hardware independent palette */ if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 v; if (regno >= 16) return 1; v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); switch (info->var.bits_per_pixel) { case 8: break; case 16: ((u32 *) (info->pseudo_palette))[regno] = v; break; case 24: case 32: ((u32 *) (info->pseudo_palette))[regno] = v; break; } return 0; } return 0; } /* * Pan or Wrap the Display * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset < 0 || var->yoffset >= info->var.yres_virtual || var->xoffset) return -EINVAL; } else { if (var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; } info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; return 0; } /* * Most drivers don't need their own mmap function */ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { #ifndef MMU /* this is uClinux (no MMU) specific code */ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); vma->vm_start = videomemory; return 0; #else return -EINVAL; #endif } static int __init mc68x328fb_setup(char *options) { if (!options || !*options) return 1; return 1; } /* * Initialisation */ static int __init mc68x328fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("68328fb", &option)) return -ENODEV; mc68x328fb_setup(option); #endif /* * initialize the default mode from the LCD controller registers */ mc68x328fb_default.xres = LXMAX; mc68x328fb_default.yres = LYMAX+1; mc68x328fb_default.xres_virtual = mc68x328fb_default.xres; mc68x328fb_default.yres_virtual = mc68x328fb_default.yres; mc68x328fb_default.bits_per_pixel = 1 + (LPICF & 0x01); videomemory = LSSA; videomemorysize = (mc68x328fb_default.xres_virtual+7) / 8 * mc68x328fb_default.yres_virtual * mc68x328fb_default.bits_per_pixel; fb_info.screen_base = (void *)videomemory; fb_info.fbops = &mc68x328fb_ops; fb_info.var = mc68x328fb_default; fb_info.fix = mc68x328fb_fix; fb_info.fix.smem_start = videomemory; fb_info.fix.smem_len = videomemorysize; fb_info.fix.line_length = get_line_length(mc68x328fb_default.xres_virtual, mc68x328fb_default.bits_per_pixel); fb_info.fix.visual = (mc68x328fb_default.bits_per_pixel) == 1 ? FB_VISUAL_MONO10 : FB_VISUAL_PSEUDOCOLOR; if (fb_info.var.bits_per_pixel == 1) { fb_info.var.red.length = fb_info.var.green.length = fb_info.var.blue.length = 1; fb_info.var.red.offset = fb_info.var.green.offset = fb_info.var.blue.offset = 0; } fb_info.pseudo_palette = &mc68x328fb_pseudo_palette; fb_info.flags = FBINFO_HWACCEL_YPAN; if (fb_alloc_cmap(&fb_info.cmap, 256, 0)) return -ENOMEM; if (register_framebuffer(&fb_info) < 0) { fb_dealloc_cmap(&fb_info.cmap); return -EINVAL; } fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id); fb_info(&fb_info, "%dx%dx%d at 0x%08lx\n", mc68x328fb_default.xres_virtual, mc68x328fb_default.yres_virtual, 1 << mc68x328fb_default.bits_per_pixel, videomemory); return 0; } module_init(mc68x328fb_init); #ifdef MODULE static void __exit mc68x328fb_cleanup(void) { unregister_framebuffer(&fb_info); fb_dealloc_cmap(&fb_info.cmap); } module_exit(mc68x328fb_cleanup); MODULE_LICENSE("GPL"); #endif /* MODULE */
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "include/logger_interface.h" #include "../dce110/irq_service_dce110.h" #include "dcn/dcn_3_1_5_offset.h" #include "dcn/dcn_3_1_5_sh_mask.h" #include "irq_service_dcn315.h" #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #define DCN_BASE__INST0_SEG0 0x00000012 #define DCN_BASE__INST0_SEG1 0x000000C0 #define DCN_BASE__INST0_SEG2 0x000034C0 #define DCN_BASE__INST0_SEG3 0x00009000 #define DCN_BASE__INST0_SEG4 0x02403C00 #define DCN_BASE__INST0_SEG5 0 static enum dc_irq_source to_dal_irq_source_dcn315( struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK1; case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK2; case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK3; case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK4; case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC1_VLINE0; case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC2_VLINE0; case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC3_VLINE0; case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC4_VLINE0; case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC5_VLINE0; case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC6_VLINE0; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP2; case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP3; case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP4; case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP5; case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP6; case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE1; case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE2; case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE3; case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE4; case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE5; case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT: return DC_IRQ_SOURCE_VUPDATE6; case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: return DC_IRQ_SOURCE_DMCUB_OUTBOX; case DCN_1_0__SRCID__DC_HPD1_INT: /* generic src_id for all HPD and HPDRX interrupts */ switch (ext_id) { case DCN_1_0__CTXID__DC_HPD1_INT: return DC_IRQ_SOURCE_HPD1; case DCN_1_0__CTXID__DC_HPD2_INT: return DC_IRQ_SOURCE_HPD2; case DCN_1_0__CTXID__DC_HPD3_INT: return DC_IRQ_SOURCE_HPD3; case DCN_1_0__CTXID__DC_HPD4_INT: return DC_IRQ_SOURCE_HPD4; case DCN_1_0__CTXID__DC_HPD5_INT: return DC_IRQ_SOURCE_HPD5; case DCN_1_0__CTXID__DC_HPD6_INT: return DC_IRQ_SOURCE_HPD6; case DCN_1_0__CTXID__DC_HPD1_RX_INT: return DC_IRQ_SOURCE_HPD1RX; case DCN_1_0__CTXID__DC_HPD2_RX_INT: return DC_IRQ_SOURCE_HPD2RX; case DCN_1_0__CTXID__DC_HPD3_RX_INT: return DC_IRQ_SOURCE_HPD3RX; case DCN_1_0__CTXID__DC_HPD4_RX_INT: return DC_IRQ_SOURCE_HPD4RX; case DCN_1_0__CTXID__DC_HPD5_RX_INT: return DC_IRQ_SOURCE_HPD5RX; case DCN_1_0__CTXID__DC_HPD6_RX_INT: return DC_IRQ_SOURCE_HPD6RX; default: return DC_IRQ_SOURCE_INVALID; } break; default: return DC_IRQ_SOURCE_INVALID; } } static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) { uint32_t addr = info->status_reg; uint32_t value = dm_read_reg(irq_service->ctx, addr); uint32_t current_status = get_reg_field_value( value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); dal_irq_service_ack_generic(irq_service, info); value = dm_read_reg(irq_service->ctx, info->enable_reg); set_reg_field_value( value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); dm_write_reg(irq_service->ctx, info->enable_reg, value); return true; } static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, .ack = hpd_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { .set = NULL, .ack = NULL }; static struct irq_source_info_funcs pflip_irq_info_funcs = { .set = NULL, .ack = NULL }; static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .set = NULL, .ack = NULL }; static struct irq_source_info_funcs vblank_irq_info_funcs = { .set = NULL, .ack = NULL }; static struct irq_source_info_funcs outbox_irq_info_funcs = { .set = NULL, .ack = NULL }; static struct irq_source_info_funcs vline0_irq_info_funcs = { .set = NULL, .ack = NULL }; #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg /* compile time expand base address. */ #define BASE(seg) \ BASE_INNER(seg) #define SRI(reg_name, block, id)\ BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI_DMUB(reg_name)\ BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\ .enable_reg = SRI(reg1, block, reg_num),\ .enable_mask = \ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ .enable_value = {\ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ },\ .ack_reg = SRI(reg2, block, reg_num),\ .ack_mask = \ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ .ack_value = \ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ #define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\ .enable_reg = SRI_DMUB(reg1),\ .enable_mask = \ reg1 ## __ ## mask1 ## _MASK,\ .enable_value = {\ reg1 ## __ ## mask1 ## _MASK,\ ~reg1 ## __ ## mask1 ## _MASK \ },\ .ack_reg = SRI_DMUB(reg2),\ .ack_mask = \ reg2 ## __ ## mask2 ## _MASK,\ .ack_value = \ reg2 ## __ ## mask2 ## _MASK \ #define hpd_int_entry(reg_num)\ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\ IRQ_REG_ENTRY(HPD, reg_num,\ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ .funcs = &hpd_irq_info_funcs\ } #define hpd_rx_int_entry(reg_num)\ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\ IRQ_REG_ENTRY(HPD, reg_num,\ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ .funcs = &hpd_rx_irq_info_funcs\ } #define pflip_int_entry(reg_num)\ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\ IRQ_REG_ENTRY(HUBPREQ, reg_num,\ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ .funcs = &pflip_irq_info_funcs\ } /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ #define vupdate_no_lock_int_entry(reg_num)\ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ .funcs = &vupdate_no_lock_irq_info_funcs\ } #define vblank_int_entry(reg_num)\ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ .funcs = &vblank_irq_info_funcs\ } #define vline0_int_entry(reg_num)\ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ .funcs = &vline0_irq_info_funcs\ } #define dmub_outbox_int_entry()\ [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ IRQ_REG_ENTRY_DMUB(\ DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ .funcs = &outbox_irq_info_funcs\ } #define dummy_irq_entry() \ {\ .funcs = &dummy_irq_info_funcs\ } #define i2c_int_entry(reg_num) \ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry() #define dp_sink_int_entry(reg_num) \ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry() #define gpio_pad_int_entry(reg_num) \ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry() #define dc_underflow_int_entry(reg_num) \ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() static struct irq_source_info_funcs dummy_irq_info_funcs = { .set = dal_irq_service_dummy_set, .ack = dal_irq_service_dummy_ack }; static const struct irq_source_info irq_source_info_dcn315[DAL_IRQ_SOURCES_NUMBER] = { [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(), hpd_int_entry(0), hpd_int_entry(1), hpd_int_entry(2), hpd_int_entry(3), hpd_int_entry(4), hpd_rx_int_entry(0), hpd_rx_int_entry(1), hpd_rx_int_entry(2), hpd_rx_int_entry(3), hpd_rx_int_entry(4), i2c_int_entry(1), i2c_int_entry(2), i2c_int_entry(3), i2c_int_entry(4), i2c_int_entry(5), i2c_int_entry(6), dp_sink_int_entry(1), dp_sink_int_entry(2), dp_sink_int_entry(3), dp_sink_int_entry(4), dp_sink_int_entry(5), dp_sink_int_entry(6), [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(), pflip_int_entry(0), pflip_int_entry(1), pflip_int_entry(2), pflip_int_entry(3), [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(), [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(), [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), gpio_pad_int_entry(0), gpio_pad_int_entry(1), gpio_pad_int_entry(2), gpio_pad_int_entry(3), gpio_pad_int_entry(4), gpio_pad_int_entry(5), gpio_pad_int_entry(6), gpio_pad_int_entry(7), gpio_pad_int_entry(8), gpio_pad_int_entry(9), gpio_pad_int_entry(10), gpio_pad_int_entry(11), gpio_pad_int_entry(12), gpio_pad_int_entry(13), gpio_pad_int_entry(14), gpio_pad_int_entry(15), gpio_pad_int_entry(16), gpio_pad_int_entry(17), gpio_pad_int_entry(18), gpio_pad_int_entry(19), gpio_pad_int_entry(20), gpio_pad_int_entry(21), gpio_pad_int_entry(22), gpio_pad_int_entry(23), gpio_pad_int_entry(24), gpio_pad_int_entry(25), gpio_pad_int_entry(26), gpio_pad_int_entry(27), gpio_pad_int_entry(28), gpio_pad_int_entry(29), gpio_pad_int_entry(30), dc_underflow_int_entry(1), dc_underflow_int_entry(2), dc_underflow_int_entry(3), dc_underflow_int_entry(4), dc_underflow_int_entry(5), dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), vupdate_no_lock_int_entry(3), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), vblank_int_entry(3), vline0_int_entry(0), vline0_int_entry(1), vline0_int_entry(2), vline0_int_entry(3), [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), dmub_outbox_int_entry(), }; static const struct irq_service_funcs irq_service_funcs_dcn315 = { .to_dal_irq_source = to_dal_irq_source_dcn315 }; static void dcn315_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { dal_irq_service_construct(irq_service, init_data); irq_service->info = irq_source_info_dcn315; irq_service->funcs = &irq_service_funcs_dcn315; } struct irq_service *dal_irq_service_dcn315_create( struct irq_service_init_data *init_data) { struct irq_service *irq_service = kzalloc(sizeof(*irq_service), GFP_KERNEL); if (!irq_service) return NULL; dcn315_irq_construct(irq_service, init_data); return irq_service; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the NetFilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Authors: Wensong Zhang <[email protected]> * Peter Kese <[email protected]> * Julian Anastasov <[email protected]> * * Changes: */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/workqueue.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include <linux/nsproxy.h> #include <net/ip.h> #ifdef CONFIG_IP_VS_IPV6 #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <net/route.h> #include <net/sock.h> #include <net/genetlink.h> #include <linux/uaccess.h> #include <net/ip_vs.h> MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME); DEFINE_MUTEX(__ip_vs_mutex); /* Serialize configuration with sockopt/netlink */ /* sysctl variables */ #ifdef CONFIG_IP_VS_DEBUG static int sysctl_ip_vs_debug_level = 0; int ip_vs_get_debug_level(void) { return sysctl_ip_vs_debug_level; } #endif /* Protos */ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup); #ifdef CONFIG_IP_VS_IPV6 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ static bool __ip_vs_addr_is_local_v6(struct net *net, const struct in6_addr *addr) { struct flowi6 fl6 = { .daddr = *addr, }; struct dst_entry *dst = ip6_route_output(net, NULL, &fl6); bool is_local; is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK); dst_release(dst); return is_local; } #endif #ifdef CONFIG_SYSCTL /* * update_defense_level is called from keventd and from sysctl, * so it needs to protect itself from softirqs */ static void update_defense_level(struct netns_ipvs *ipvs) { struct sysinfo i; int availmem; int amemthresh; int nomem; int to_change = -1; /* we only count free and buffered memory (in pages) */ si_meminfo(&i); availmem = i.freeram + i.bufferram; /* however in linux 2.5 the i.bufferram is total page cache size, we need adjust it */ /* si_swapinfo(&i); */ /* availmem = availmem - (i.totalswap - i.freeswap); */ amemthresh = max(READ_ONCE(ipvs->sysctl_amemthresh), 0); nomem = (availmem < amemthresh); local_bh_disable(); /* drop_entry */ spin_lock(&ipvs->dropentry_lock); switch (ipvs->sysctl_drop_entry) { case 0: atomic_set(&ipvs->dropentry, 0); break; case 1: if (nomem) { atomic_set(&ipvs->dropentry, 1); ipvs->sysctl_drop_entry = 2; } else { atomic_set(&ipvs->dropentry, 0); } break; case 2: if (nomem) { atomic_set(&ipvs->dropentry, 1); } else { atomic_set(&ipvs->dropentry, 0); ipvs->sysctl_drop_entry = 1; } break; case 3: atomic_set(&ipvs->dropentry, 1); break; } spin_unlock(&ipvs->dropentry_lock); /* drop_packet */ spin_lock(&ipvs->droppacket_lock); switch (ipvs->sysctl_drop_packet) { case 0: ipvs->drop_rate = 0; break; case 1: if (nomem) { ipvs->drop_counter = amemthresh / (amemthresh - availmem); ipvs->drop_rate = ipvs->drop_counter; ipvs->sysctl_drop_packet = 2; } else { ipvs->drop_rate = 0; } break; case 2: if (nomem) { ipvs->drop_counter = amemthresh / (amemthresh - availmem); ipvs->drop_rate = ipvs->drop_counter; } else { ipvs->drop_rate = 0; ipvs->sysctl_drop_packet = 1; } break; case 3: ipvs->drop_rate = ipvs->sysctl_am_droprate; break; } spin_unlock(&ipvs->droppacket_lock); /* secure_tcp */ spin_lock(&ipvs->securetcp_lock); switch (ipvs->sysctl_secure_tcp) { case 0: if (ipvs->old_secure_tcp >= 2) to_change = 0; break; case 1: if (nomem) { if (ipvs->old_secure_tcp < 2) to_change = 1; ipvs->sysctl_secure_tcp = 2; } else { if (ipvs->old_secure_tcp >= 2) to_change = 0; } break; case 2: if (nomem) { if (ipvs->old_secure_tcp < 2) to_change = 1; } else { if (ipvs->old_secure_tcp >= 2) to_change = 0; ipvs->sysctl_secure_tcp = 1; } break; case 3: if (ipvs->old_secure_tcp < 2) to_change = 1; break; } ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp; if (to_change >= 0) ip_vs_protocol_timeout_change(ipvs, ipvs->sysctl_secure_tcp > 1); spin_unlock(&ipvs->securetcp_lock); local_bh_enable(); } /* Handler for delayed work for expiring no * destination connections */ static void expire_nodest_conn_handler(struct work_struct *work) { struct netns_ipvs *ipvs; ipvs = container_of(work, struct netns_ipvs, expire_nodest_conn_work.work); ip_vs_expire_nodest_conn_flush(ipvs); } /* * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ static void defense_work_handler(struct work_struct *work) { struct netns_ipvs *ipvs = container_of(work, struct netns_ipvs, defense_work.work); update_defense_level(ipvs); if (atomic_read(&ipvs->dropentry)) ip_vs_random_dropentry(ipvs); queue_delayed_work(system_long_wq, &ipvs->defense_work, DEFENSE_TIMER_PERIOD); } #endif static void est_reload_work_handler(struct work_struct *work) { struct netns_ipvs *ipvs = container_of(work, struct netns_ipvs, est_reload_work.work); int genid_done = atomic_read(&ipvs->est_genid_done); unsigned long delay = HZ / 10; /* repeat startups after failure */ bool repeat = false; int genid; int id; mutex_lock(&ipvs->est_mutex); genid = atomic_read(&ipvs->est_genid); for (id = 0; id < ipvs->est_kt_count; id++) { struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id]; /* netns clean up started, abort delayed work */ if (!ipvs->enable) goto unlock; if (!kd) continue; /* New config ? Stop kthread tasks */ if (genid != genid_done) ip_vs_est_kthread_stop(kd); if (!kd->task && !ip_vs_est_stopped(ipvs)) { /* Do not start kthreads above 0 in calc phase */ if ((!id || !ipvs->est_calc_phase) && ip_vs_est_kthread_start(ipvs, kd) < 0) repeat = true; } } atomic_set(&ipvs->est_genid_done, genid); if (repeat) queue_delayed_work(system_long_wq, &ipvs->est_reload_work, delay); unlock: mutex_unlock(&ipvs->est_mutex); } int ip_vs_use_count_inc(void) { return try_module_get(THIS_MODULE); } void ip_vs_use_count_dec(void) { module_put(THIS_MODULE); } /* * Hash table: for virtual service lookups */ #define IP_VS_SVC_TAB_BITS 8 #define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS) #define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1) /* the service table hashed by <protocol, addr, port> */ static struct hlist_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE]; /* the service table hashed by fwmark */ static struct hlist_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; /* * Returns hash value for virtual service */ static inline unsigned int ip_vs_svc_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto, const union nf_inet_addr *addr, __be16 port) { unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; __u32 ahash; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif ahash = ntohl(addr_fold); ahash ^= ((size_t) ipvs >> 8); return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) & IP_VS_SVC_TAB_MASK; } /* * Returns hash value of fwmark for virtual service lookup */ static inline unsigned int ip_vs_svc_fwm_hashkey(struct netns_ipvs *ipvs, __u32 fwmark) { return (((size_t)ipvs>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; } /* * Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port> * or in the ip_vs_svc_fwm_table by fwmark. * Should be called with locked tables. */ static int ip_vs_svc_hash(struct ip_vs_service *svc) { unsigned int hash; if (svc->flags & IP_VS_SVC_F_HASHED) { pr_err("%s(): request for already hashed, called from %pS\n", __func__, __builtin_return_address(0)); return 0; } if (svc->fwmark == 0) { /* * Hash it by <netns,protocol,addr,port> in ip_vs_svc_table */ hash = ip_vs_svc_hashkey(svc->ipvs, svc->af, svc->protocol, &svc->addr, svc->port); hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]); } else { /* * Hash it by fwmark in svc_fwm_table */ hash = ip_vs_svc_fwm_hashkey(svc->ipvs, svc->fwmark); hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]); } svc->flags |= IP_VS_SVC_F_HASHED; /* increase its refcnt because it is referenced by the svc table */ atomic_inc(&svc->refcnt); return 1; } /* * Unhashes a service from svc_table / svc_fwm_table. * Should be called with locked tables. */ static int ip_vs_svc_unhash(struct ip_vs_service *svc) { if (!(svc->flags & IP_VS_SVC_F_HASHED)) { pr_err("%s(): request for unhash flagged, called from %pS\n", __func__, __builtin_return_address(0)); return 0; } if (svc->fwmark == 0) { /* Remove it from the svc_table table */ hlist_del_rcu(&svc->s_list); } else { /* Remove it from the svc_fwm_table table */ hlist_del_rcu(&svc->f_list); } svc->flags &= ~IP_VS_SVC_F_HASHED; atomic_dec(&svc->refcnt); return 1; } /* * Get service by {netns, proto,addr,port} in the service table. */ static inline struct ip_vs_service * __ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { unsigned int hash; struct ip_vs_service *svc; /* Check for "full" addressed entries */ hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport); hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) { if ((svc->af == af) && ip_vs_addr_equal(af, &svc->addr, vaddr) && (svc->port == vport) && (svc->protocol == protocol) && (svc->ipvs == ipvs)) { /* HIT */ return svc; } } return NULL; } /* * Get service by {fwmark} in the service table. */ static inline struct ip_vs_service * __ip_vs_svc_fwm_find(struct netns_ipvs *ipvs, int af, __u32 fwmark) { unsigned int hash; struct ip_vs_service *svc; /* Check for fwmark addressed entries */ hash = ip_vs_svc_fwm_hashkey(ipvs, fwmark); hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) { if (svc->fwmark == fwmark && svc->af == af && (svc->ipvs == ipvs)) { /* HIT */ return svc; } } return NULL; } /* Find service, called under RCU lock */ struct ip_vs_service * ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { struct ip_vs_service *svc; /* * Check the table hashed by fwmark first */ if (fwmark) { svc = __ip_vs_svc_fwm_find(ipvs, af, fwmark); if (svc) goto out; } /* * Check the table hashed by <protocol,addr,port> * for "full" addressed entries */ svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport); if (!svc && protocol == IPPROTO_TCP && atomic_read(&ipvs->ftpsvc_counter) && (vport == FTPDATA || !inet_port_requires_bind_service(ipvs->net, ntohs(vport)))) { /* * Check if ftp service entry exists, the packet * might belong to FTP data connections. */ svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT); } if (svc == NULL && atomic_read(&ipvs->nullsvc_counter)) { /* * Check if the catch-all port (port zero) exists */ svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0); } out: IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n", fwmark, ip_vs_proto_name(protocol), IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), svc ? "hit" : "not hit"); return svc; } static inline void __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) { atomic_inc(&svc->refcnt); rcu_assign_pointer(dest->svc, svc); } static void ip_vs_service_free(struct ip_vs_service *svc) { ip_vs_stats_release(&svc->stats); kfree(svc); } static void ip_vs_service_rcu_free(struct rcu_head *head) { struct ip_vs_service *svc; svc = container_of(head, struct ip_vs_service, rcu_head); ip_vs_service_free(svc); } static void __ip_vs_svc_put(struct ip_vs_service *svc) { if (atomic_dec_and_test(&svc->refcnt)) { IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", svc->fwmark, IP_VS_DBG_ADDR(svc->af, &svc->addr), ntohs(svc->port)); call_rcu(&svc->rcu_head, ip_vs_service_rcu_free); } } /* * Returns hash value for real service */ static inline unsigned int ip_vs_rs_hashkey(int af, const union nf_inet_addr *addr, __be16 port) { unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth) & IP_VS_RTAB_MASK; } /* Hash ip_vs_dest in rs_table by <proto,addr,port>. */ static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) { unsigned int hash; __be16 port; if (dest->in_rs_table) return; switch (IP_VS_DFWD_METHOD(dest)) { case IP_VS_CONN_F_MASQ: port = dest->port; break; case IP_VS_CONN_F_TUNNEL: switch (dest->tun_type) { case IP_VS_CONN_F_TUNNEL_TYPE_GUE: port = dest->tun_port; break; case IP_VS_CONN_F_TUNNEL_TYPE_IPIP: case IP_VS_CONN_F_TUNNEL_TYPE_GRE: port = 0; break; default: return; } break; default: return; } /* * Hash by proto,addr,port, * which are the parameters of the real service. */ hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port); hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]); dest->in_rs_table = 1; } /* Unhash ip_vs_dest from rs_table. */ static void ip_vs_rs_unhash(struct ip_vs_dest *dest) { /* * Remove it from the rs_table table. */ if (dest->in_rs_table) { hlist_del_rcu(&dest->d_list); dest->in_rs_table = 0; } } /* Check if real service by <proto,addr,port> is present */ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport) { unsigned int hash; struct ip_vs_dest *dest; /* Check for "full" addressed entries */ hash = ip_vs_rs_hashkey(af, daddr, dport); hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { if (dest->port == dport && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && (dest->protocol == protocol || dest->vfwmark) && IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) { /* HIT */ return true; } } return false; } /* Find real service record by <proto,addr,port>. * In case of multiple records with the same <proto,addr,port>, only * the first found record is returned. * * To be called under RCU lock. */ struct ip_vs_dest *ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport) { unsigned int hash; struct ip_vs_dest *dest; /* Check for "full" addressed entries */ hash = ip_vs_rs_hashkey(af, daddr, dport); hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { if (dest->port == dport && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && (dest->protocol == protocol || dest->vfwmark) && IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) { /* HIT */ return dest; } } return NULL; } /* Find real service record by <af,addr,tun_port>. * In case of multiple records with the same <af,addr,tun_port>, only * the first found record is returned. * * To be called under RCU lock. */ struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, const union nf_inet_addr *daddr, __be16 tun_port) { struct ip_vs_dest *dest; unsigned int hash; /* Check for "full" addressed entries */ hash = ip_vs_rs_hashkey(af, daddr, tun_port); hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { if (dest->tun_port == tun_port && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_TUNNEL) { /* HIT */ return dest; } } return NULL; } /* Lookup destination by {addr,port} in the given service * Called under RCU lock. */ static struct ip_vs_dest * ip_vs_lookup_dest(struct ip_vs_service *svc, int dest_af, const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest; /* * Find the destination for the given service */ list_for_each_entry_rcu(dest, &svc->destinations, n_list) { if ((dest->af == dest_af) && ip_vs_addr_equal(dest_af, &dest->addr, daddr) && (dest->port == dport)) { /* HIT */ return dest; } } return NULL; } /* * Find destination by {daddr,dport,vaddr,protocol} * Created to be used in ip_vs_process_message() in * the backup synchronization daemon. It finds the * destination to be bound to the received connection * on the backup. * Called under RCU lock, no refcnt is returned. */ struct ip_vs_dest *ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags) { struct ip_vs_dest *dest; struct ip_vs_service *svc; __be16 port = dport; svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport); if (!svc) return NULL; if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) port = 0; dest = ip_vs_lookup_dest(svc, dest_af, daddr, port); if (!dest) dest = ip_vs_lookup_dest(svc, dest_af, daddr, port ^ dport); return dest; } void ip_vs_dest_dst_rcu_free(struct rcu_head *head) { struct ip_vs_dest_dst *dest_dst = container_of(head, struct ip_vs_dest_dst, rcu_head); dst_release(dest_dst->dst_cache); kfree(dest_dst); } /* Release dest_dst and dst_cache for dest in user context */ static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest) { struct ip_vs_dest_dst *old; old = rcu_dereference_protected(dest->dest_dst, 1); if (old) { RCU_INIT_POINTER(dest->dest_dst, NULL); call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free); } } /* * Lookup dest by {svc,addr,port} in the destination trash. * The destination trash is used to hold the destinations that are removed * from the service table but are still referenced by some conn entries. * The reason to add the destination trash is when the dest is temporary * down (either by administrator or by monitor program), the dest can be * picked back from the trash, the remaining connections to the dest can * continue, and the counting information of the dest is also useful for * scheduling. */ static struct ip_vs_dest * ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af, const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest; struct netns_ipvs *ipvs = svc->ipvs; /* * Find the destination in trash */ spin_lock_bh(&ipvs->dest_trash_lock); list_for_each_entry(dest, &ipvs->dest_trash, t_list) { IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " "dest->refcnt=%d\n", dest->vfwmark, IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), refcount_read(&dest->refcnt)); if (dest->af == dest_af && ip_vs_addr_equal(dest_af, &dest->addr, daddr) && dest->port == dport && dest->vfwmark == svc->fwmark && dest->protocol == svc->protocol && (svc->fwmark || (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && dest->vport == svc->port))) { /* HIT */ list_del(&dest->t_list); goto out; } } dest = NULL; out: spin_unlock_bh(&ipvs->dest_trash_lock); return dest; } static void ip_vs_dest_rcu_free(struct rcu_head *head) { struct ip_vs_dest *dest; dest = container_of(head, struct ip_vs_dest, rcu_head); ip_vs_stats_release(&dest->stats); ip_vs_dest_put_and_free(dest); } static void ip_vs_dest_free(struct ip_vs_dest *dest) { struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1); __ip_vs_dst_cache_reset(dest); __ip_vs_svc_put(svc); call_rcu(&dest->rcu_head, ip_vs_dest_rcu_free); } /* * Clean up all the destinations in the trash * Called by the ip_vs_control_cleanup() * * When the ip_vs_control_clearup is activated by ipvs module exit, * the service tables must have been flushed and all the connections * are expired, and the refcnt of each destination in the trash must * be 1, so we simply release them here. */ static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs) { struct ip_vs_dest *dest, *nxt; del_timer_sync(&ipvs->dest_trash_timer); /* No need to use dest_trash_lock */ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) { list_del(&dest->t_list); ip_vs_dest_free(dest); } } static void ip_vs_stats_rcu_free(struct rcu_head *head) { struct ip_vs_stats_rcu *rs = container_of(head, struct ip_vs_stats_rcu, rcu_head); ip_vs_stats_release(&rs->s); kfree(rs); } static void ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src) { #define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c spin_lock(&src->lock); IP_VS_SHOW_STATS_COUNTER(conns); IP_VS_SHOW_STATS_COUNTER(inpkts); IP_VS_SHOW_STATS_COUNTER(outpkts); IP_VS_SHOW_STATS_COUNTER(inbytes); IP_VS_SHOW_STATS_COUNTER(outbytes); ip_vs_read_estimator(dst, src); spin_unlock(&src->lock); } static void ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src) { dst->conns = (u32)src->conns; dst->inpkts = (u32)src->inpkts; dst->outpkts = (u32)src->outpkts; dst->inbytes = src->inbytes; dst->outbytes = src->outbytes; dst->cps = (u32)src->cps; dst->inpps = (u32)src->inpps; dst->outpps = (u32)src->outpps; dst->inbps = (u32)src->inbps; dst->outbps = (u32)src->outbps; } static void ip_vs_zero_stats(struct ip_vs_stats *stats) { spin_lock(&stats->lock); /* get current counters as zero point, rates are zeroed */ #define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c IP_VS_ZERO_STATS_COUNTER(conns); IP_VS_ZERO_STATS_COUNTER(inpkts); IP_VS_ZERO_STATS_COUNTER(outpkts); IP_VS_ZERO_STATS_COUNTER(inbytes); IP_VS_ZERO_STATS_COUNTER(outbytes); ip_vs_zero_estimator(stats); spin_unlock(&stats->lock); } /* Allocate fields after kzalloc */ int ip_vs_stats_init_alloc(struct ip_vs_stats *s) { int i; spin_lock_init(&s->lock); s->cpustats = alloc_percpu(struct ip_vs_cpu_stats); if (!s->cpustats) return -ENOMEM; for_each_possible_cpu(i) { struct ip_vs_cpu_stats *cs = per_cpu_ptr(s->cpustats, i); u64_stats_init(&cs->syncp); } return 0; } struct ip_vs_stats *ip_vs_stats_alloc(void) { struct ip_vs_stats *s = kzalloc(sizeof(*s), GFP_KERNEL); if (s && ip_vs_stats_init_alloc(s) >= 0) return s; kfree(s); return NULL; } void ip_vs_stats_release(struct ip_vs_stats *stats) { free_percpu(stats->cpustats); } void ip_vs_stats_free(struct ip_vs_stats *stats) { if (stats) { ip_vs_stats_release(stats); kfree(stats); } } /* * Update a destination in the given service */ static void __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest, int add) { struct netns_ipvs *ipvs = svc->ipvs; struct ip_vs_service *old_svc; struct ip_vs_scheduler *sched; int conn_flags; /* We cannot modify an address and change the address family */ BUG_ON(!add && udest->af != dest->af); if (add && udest->af != svc->af) ipvs->mixed_address_family_dests++; /* keep the last_weight with latest non-0 weight */ if (add || udest->weight != 0) atomic_set(&dest->last_weight, udest->weight); /* set the weight and the flags */ atomic_set(&dest->weight, udest->weight); conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK; conn_flags |= IP_VS_CONN_F_INACTIVE; /* Need to rehash? */ if ((udest->conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_DFWD_METHOD(dest) || udest->tun_type != dest->tun_type || udest->tun_port != dest->tun_port) ip_vs_rs_unhash(dest); /* set the tunnel info */ dest->tun_type = udest->tun_type; dest->tun_port = udest->tun_port; dest->tun_flags = udest->tun_flags; /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) { conn_flags |= IP_VS_CONN_F_NOOUTPUT; } else { /* FTP-NAT requires conntrack for mangling */ if (svc->port == FTPPORT) ip_vs_register_conntrack(svc); } atomic_set(&dest->conn_flags, conn_flags); /* Put the real service in rs_table if not present. */ ip_vs_rs_hash(ipvs, dest); /* bind the service */ old_svc = rcu_dereference_protected(dest->svc, 1); if (!old_svc) { __ip_vs_bind_svc(dest, svc); } else { if (old_svc != svc) { ip_vs_zero_stats(&dest->stats); __ip_vs_bind_svc(dest, svc); __ip_vs_svc_put(old_svc); } } /* set the dest status flags */ dest->flags |= IP_VS_DEST_F_AVAILABLE; if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; dest->u_threshold = udest->u_threshold; dest->l_threshold = udest->l_threshold; dest->af = udest->af; spin_lock_bh(&dest->dst_lock); __ip_vs_dst_cache_reset(dest); spin_unlock_bh(&dest->dst_lock); if (add) { list_add_rcu(&dest->n_list, &svc->destinations); svc->num_dests++; sched = rcu_dereference_protected(svc->scheduler, 1); if (sched && sched->add_dest) sched->add_dest(svc, dest); } else { sched = rcu_dereference_protected(svc->scheduler, 1); if (sched && sched->upd_dest) sched->upd_dest(svc, dest); } } /* * Create a destination for the given service */ static int ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; unsigned int atype; int ret; #ifdef CONFIG_IP_VS_IPV6 if (udest->af == AF_INET6) { atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) return -EINVAL; ret = nf_defrag_ipv6_enable(svc->ipvs->net); if (ret) return ret; } else #endif { atype = inet_addr_type(svc->ipvs->net, udest->addr.ip); if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; } dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL); if (dest == NULL) return -ENOMEM; ret = ip_vs_stats_init_alloc(&dest->stats); if (ret < 0) goto err_alloc; ret = ip_vs_start_estimator(svc->ipvs, &dest->stats); if (ret < 0) goto err_stats; dest->af = udest->af; dest->protocol = svc->protocol; dest->vaddr = svc->addr; dest->vport = svc->port; dest->vfwmark = svc->fwmark; ip_vs_addr_copy(udest->af, &dest->addr, &udest->addr); dest->port = udest->port; atomic_set(&dest->activeconns, 0); atomic_set(&dest->inactconns, 0); atomic_set(&dest->persistconns, 0); refcount_set(&dest->refcnt, 1); INIT_HLIST_NODE(&dest->d_list); spin_lock_init(&dest->dst_lock); __ip_vs_update_dest(svc, dest, udest, 1); return 0; err_stats: ip_vs_stats_release(&dest->stats); err_alloc: kfree(dest); return ret; } /* * Add a destination into an existing service */ static int ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; union nf_inet_addr daddr; __be16 dport = udest->port; int ret; if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { pr_err("%s(): lower threshold is higher than upper threshold\n", __func__); return -ERANGE; } if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) { if (udest->tun_port == 0) { pr_err("%s(): tunnel port is zero\n", __func__); return -EINVAL; } } ip_vs_addr_copy(udest->af, &daddr, &udest->addr); /* We use function that requires RCU lock */ rcu_read_lock(); dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport); rcu_read_unlock(); if (dest != NULL) { IP_VS_DBG(1, "%s(): dest already exists\n", __func__); return -EEXIST; } /* * Check if the dest already exists in the trash and * is from the same service */ dest = ip_vs_trash_get_dest(svc, udest->af, &daddr, dport); if (dest != NULL) { IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " "dest->refcnt=%d, service %u/%s:%u\n", IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), refcount_read(&dest->refcnt), dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->vaddr), ntohs(dest->vport)); ret = ip_vs_start_estimator(svc->ipvs, &dest->stats); if (ret < 0) return ret; __ip_vs_update_dest(svc, dest, udest, 1); } else { /* * Allocate and initialize the dest structure */ ret = ip_vs_new_dest(svc, udest); } return ret; } /* * Edit a destination in the given service */ static int ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; union nf_inet_addr daddr; __be16 dport = udest->port; if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { pr_err("%s(): lower threshold is higher than upper threshold\n", __func__); return -ERANGE; } if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) { if (udest->tun_port == 0) { pr_err("%s(): tunnel port is zero\n", __func__); return -EINVAL; } } ip_vs_addr_copy(udest->af, &daddr, &udest->addr); /* We use function that requires RCU lock */ rcu_read_lock(); dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport); rcu_read_unlock(); if (dest == NULL) { IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__); return -ENOENT; } __ip_vs_update_dest(svc, dest, udest, 0); return 0; } /* * Delete a destination (must be already unlinked from the service) */ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, bool cleanup) { ip_vs_stop_estimator(ipvs, &dest->stats); /* * Remove it from the d-linked list with the real services. */ ip_vs_rs_unhash(dest); spin_lock_bh(&ipvs->dest_trash_lock); IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), refcount_read(&dest->refcnt)); if (list_empty(&ipvs->dest_trash) && !cleanup) mod_timer(&ipvs->dest_trash_timer, jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); /* dest lives in trash with reference */ list_add(&dest->t_list, &ipvs->dest_trash); dest->idle_start = 0; spin_unlock_bh(&ipvs->dest_trash_lock); /* Queue up delayed work to expire all no destination connections. * No-op when CONFIG_SYSCTL is disabled. */ if (!cleanup) ip_vs_enqueue_expire_nodest_conns(ipvs); } /* * Unlink a destination from the given service */ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, int svcupd) { dest->flags &= ~IP_VS_DEST_F_AVAILABLE; /* * Remove it from the d-linked destination list. */ list_del_rcu(&dest->n_list); svc->num_dests--; if (dest->af != svc->af) svc->ipvs->mixed_address_family_dests--; if (svcupd) { struct ip_vs_scheduler *sched; sched = rcu_dereference_protected(svc->scheduler, 1); if (sched && sched->del_dest) sched->del_dest(svc, dest); } } /* * Delete a destination server in the given service */ static int ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; __be16 dport = udest->port; /* We use function that requires RCU lock */ rcu_read_lock(); dest = ip_vs_lookup_dest(svc, udest->af, &udest->addr, dport); rcu_read_unlock(); if (dest == NULL) { IP_VS_DBG(1, "%s(): destination not found!\n", __func__); return -ENOENT; } /* * Unlink dest from the service */ __ip_vs_unlink_dest(svc, dest, 1); /* * Delete the destination */ __ip_vs_del_dest(svc->ipvs, dest, false); return 0; } static void ip_vs_dest_trash_expire(struct timer_list *t) { struct netns_ipvs *ipvs = from_timer(ipvs, t, dest_trash_timer); struct ip_vs_dest *dest, *next; unsigned long now = jiffies; spin_lock(&ipvs->dest_trash_lock); list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { if (refcount_read(&dest->refcnt) > 1) continue; if (dest->idle_start) { if (time_before(now, dest->idle_start + IP_VS_DEST_TRASH_PERIOD)) continue; } else { dest->idle_start = max(1UL, now); continue; } IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", dest->vfwmark, IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); list_del(&dest->t_list); ip_vs_dest_free(dest); } if (!list_empty(&ipvs->dest_trash)) mod_timer(&ipvs->dest_trash_timer, jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); spin_unlock(&ipvs->dest_trash_lock); } /* * Add a service into the service hash table */ static int ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, struct ip_vs_service **svc_p) { int ret = 0; struct ip_vs_scheduler *sched = NULL; struct ip_vs_pe *pe = NULL; struct ip_vs_service *svc = NULL; int ret_hooks = -1; /* increase the module use count */ if (!ip_vs_use_count_inc()) return -ENOPROTOOPT; /* Lookup the scheduler by 'u->sched_name' */ if (strcmp(u->sched_name, "none")) { sched = ip_vs_scheduler_get(u->sched_name); if (!sched) { pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); ret = -ENOENT; goto out_err; } } if (u->pe_name && *u->pe_name) { pe = ip_vs_pe_getbyname(u->pe_name); if (pe == NULL) { pr_info("persistence engine module ip_vs_pe_%s " "not found\n", u->pe_name); ret = -ENOENT; goto out_err; } } #ifdef CONFIG_IP_VS_IPV6 if (u->af == AF_INET6) { __u32 plen = (__force __u32) u->netmask; if (plen < 1 || plen > 128) { ret = -EINVAL; goto out_err; } ret = nf_defrag_ipv6_enable(ipvs->net); if (ret) goto out_err; } #endif if ((u->af == AF_INET && !ipvs->num_services) || (u->af == AF_INET6 && !ipvs->num_services6)) { ret = ip_vs_register_hooks(ipvs, u->af); if (ret < 0) goto out_err; ret_hooks = ret; } svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL); if (svc == NULL) { IP_VS_DBG(1, "%s(): no memory\n", __func__); ret = -ENOMEM; goto out_err; } ret = ip_vs_stats_init_alloc(&svc->stats); if (ret < 0) goto out_err; /* I'm the first user of the service */ atomic_set(&svc->refcnt, 0); svc->af = u->af; svc->protocol = u->protocol; ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); svc->port = u->port; svc->fwmark = u->fwmark; svc->flags = u->flags & ~IP_VS_SVC_F_HASHED; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; svc->ipvs = ipvs; INIT_LIST_HEAD(&svc->destinations); spin_lock_init(&svc->sched_lock); /* Bind the scheduler */ if (sched) { ret = ip_vs_bind_scheduler(svc, sched); if (ret) goto out_err; sched = NULL; } ret = ip_vs_start_estimator(ipvs, &svc->stats); if (ret < 0) goto out_err; /* Update the virtual service counters */ if (svc->port == FTPPORT) atomic_inc(&ipvs->ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ipvs->nullsvc_counter); if (pe && pe->conn_out) atomic_inc(&ipvs->conn_out_counter); /* Bind the ct retriever */ RCU_INIT_POINTER(svc->pe, pe); pe = NULL; /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ipvs->num_services++; else if (svc->af == AF_INET6) ipvs->num_services6++; /* Hash the service into the service table */ ip_vs_svc_hash(svc); *svc_p = svc; if (!ipvs->enable) { /* Now there is a service - full throttle */ ipvs->enable = 1; /* Start estimation for first time */ ip_vs_est_reload_start(ipvs); } return 0; out_err: if (ret_hooks >= 0) ip_vs_unregister_hooks(ipvs, u->af); if (svc != NULL) { ip_vs_unbind_scheduler(svc, sched); ip_vs_service_free(svc); } ip_vs_scheduler_put(sched); ip_vs_pe_put(pe); /* decrease the module use count */ ip_vs_use_count_dec(); return ret; } /* * Edit a service and bind it with a new scheduler */ static int ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) { struct ip_vs_scheduler *sched = NULL, *old_sched; struct ip_vs_pe *pe = NULL, *old_pe = NULL; int ret = 0; bool new_pe_conn_out, old_pe_conn_out; /* * Lookup the scheduler, by 'u->sched_name' */ if (strcmp(u->sched_name, "none")) { sched = ip_vs_scheduler_get(u->sched_name); if (!sched) { pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); return -ENOENT; } } old_sched = sched; if (u->pe_name && *u->pe_name) { pe = ip_vs_pe_getbyname(u->pe_name); if (pe == NULL) { pr_info("persistence engine module ip_vs_pe_%s " "not found\n", u->pe_name); ret = -ENOENT; goto out; } old_pe = pe; } #ifdef CONFIG_IP_VS_IPV6 if (u->af == AF_INET6) { __u32 plen = (__force __u32) u->netmask; if (plen < 1 || plen > 128) { ret = -EINVAL; goto out; } } #endif old_sched = rcu_dereference_protected(svc->scheduler, 1); if (sched != old_sched) { if (old_sched) { ip_vs_unbind_scheduler(svc, old_sched); RCU_INIT_POINTER(svc->scheduler, NULL); /* Wait all svc->sched_data users */ synchronize_rcu(); } /* Bind the new scheduler */ if (sched) { ret = ip_vs_bind_scheduler(svc, sched); if (ret) { ip_vs_scheduler_put(sched); goto out; } } } /* * Set the flags and timeout value */ svc->flags = u->flags | IP_VS_SVC_F_HASHED; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; old_pe = rcu_dereference_protected(svc->pe, 1); if (pe != old_pe) { rcu_assign_pointer(svc->pe, pe); /* check for optional methods in new pe */ new_pe_conn_out = (pe && pe->conn_out) ? true : false; old_pe_conn_out = (old_pe && old_pe->conn_out) ? true : false; if (new_pe_conn_out && !old_pe_conn_out) atomic_inc(&svc->ipvs->conn_out_counter); if (old_pe_conn_out && !new_pe_conn_out) atomic_dec(&svc->ipvs->conn_out_counter); } out: ip_vs_scheduler_put(old_sched); ip_vs_pe_put(old_pe); return ret; } /* * Delete a service from the service list * - The service must be unlinked, unlocked and not referenced! * - We are called under _bh lock */ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) { struct ip_vs_dest *dest, *nxt; struct ip_vs_scheduler *old_sched; struct ip_vs_pe *old_pe; struct netns_ipvs *ipvs = svc->ipvs; if (svc->af == AF_INET) { ipvs->num_services--; if (!ipvs->num_services) ip_vs_unregister_hooks(ipvs, svc->af); } else if (svc->af == AF_INET6) { ipvs->num_services6--; if (!ipvs->num_services6) ip_vs_unregister_hooks(ipvs, svc->af); } ip_vs_stop_estimator(svc->ipvs, &svc->stats); /* Unbind scheduler */ old_sched = rcu_dereference_protected(svc->scheduler, 1); ip_vs_unbind_scheduler(svc, old_sched); ip_vs_scheduler_put(old_sched); /* Unbind persistence engine, keep svc->pe */ old_pe = rcu_dereference_protected(svc->pe, 1); if (old_pe && old_pe->conn_out) atomic_dec(&ipvs->conn_out_counter); ip_vs_pe_put(old_pe); /* * Unlink the whole destination list */ list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) { __ip_vs_unlink_dest(svc, dest, 0); __ip_vs_del_dest(svc->ipvs, dest, cleanup); } /* * Update the virtual service counters */ if (svc->port == FTPPORT) atomic_dec(&ipvs->ftpsvc_counter); else if (svc->port == 0) atomic_dec(&ipvs->nullsvc_counter); /* * Free the service if nobody refers to it */ __ip_vs_svc_put(svc); /* decrease the module use count */ ip_vs_use_count_dec(); } /* * Unlink a service from list and try to delete it if its refcnt reached 0 */ static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup) { ip_vs_unregister_conntrack(svc); /* Hold svc to avoid double release from dest_trash */ atomic_inc(&svc->refcnt); /* * Unhash it from the service table */ ip_vs_svc_unhash(svc); __ip_vs_del_service(svc, cleanup); } /* * Delete a service from the service list */ static int ip_vs_del_service(struct ip_vs_service *svc) { if (svc == NULL) return -EEXIST; ip_vs_unlink_service(svc, false); return 0; } /* * Flush all the virtual services */ static int ip_vs_flush(struct netns_ipvs *ipvs, bool cleanup) { int idx; struct ip_vs_service *svc; struct hlist_node *n; /* * Flush the service table hashed by <netns,protocol,addr,port> */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx], s_list) { if (svc->ipvs == ipvs) ip_vs_unlink_service(svc, cleanup); } } /* * Flush the service table hashed by fwmark */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx], f_list) { if (svc->ipvs == ipvs) ip_vs_unlink_service(svc, cleanup); } } return 0; } /* * Delete service by {netns} in the service table. * Called by __ip_vs_batch_cleanup() */ void ip_vs_service_nets_cleanup(struct list_head *net_list) { struct netns_ipvs *ipvs; struct net *net; /* Check for "full" addressed entries */ mutex_lock(&__ip_vs_mutex); list_for_each_entry(net, net_list, exit_list) { ipvs = net_ipvs(net); ip_vs_flush(ipvs, true); } mutex_unlock(&__ip_vs_mutex); } /* Put all references for device (dst_cache) */ static inline void ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev) { struct ip_vs_dest_dst *dest_dst; spin_lock_bh(&dest->dst_lock); dest_dst = rcu_dereference_protected(dest->dest_dst, 1); if (dest_dst && dest_dst->dst_cache->dev == dev) { IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", dev->name, IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), refcount_read(&dest->refcnt)); __ip_vs_dst_cache_reset(dest); } spin_unlock_bh(&dest->dst_lock); } /* Netdev event receiver * Currently only NETDEV_DOWN is handled to release refs to cached dsts */ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; struct ip_vs_dest *dest; unsigned int idx; if (event != NETDEV_DOWN || !ipvs) return NOTIFY_DONE; IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); mutex_lock(&__ip_vs_mutex); for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { if (svc->ipvs == ipvs) { list_for_each_entry(dest, &svc->destinations, n_list) { ip_vs_forget_dev(dest, dev); } } } hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { if (svc->ipvs == ipvs) { list_for_each_entry(dest, &svc->destinations, n_list) { ip_vs_forget_dev(dest, dev); } } } } spin_lock_bh(&ipvs->dest_trash_lock); list_for_each_entry(dest, &ipvs->dest_trash, t_list) { ip_vs_forget_dev(dest, dev); } spin_unlock_bh(&ipvs->dest_trash_lock); mutex_unlock(&__ip_vs_mutex); return NOTIFY_DONE; } /* * Zero counters in a service or all services */ static int ip_vs_zero_service(struct ip_vs_service *svc) { struct ip_vs_dest *dest; list_for_each_entry(dest, &svc->destinations, n_list) { ip_vs_zero_stats(&dest->stats); } ip_vs_zero_stats(&svc->stats); return 0; } static int ip_vs_zero_all(struct netns_ipvs *ipvs) { int idx; struct ip_vs_service *svc; for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { if (svc->ipvs == ipvs) ip_vs_zero_service(svc); } } for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { if (svc->ipvs == ipvs) ip_vs_zero_service(svc); } } ip_vs_zero_stats(&ipvs->tot_stats->s); return 0; } #ifdef CONFIG_SYSCTL static int proc_do_defense_mode(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val = *valp; int rc; struct ctl_table tmp = { .data = &val, .maxlen = sizeof(int), .mode = table->mode, }; rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write && (*valp != val)) { if (val < 0 || val > 3) { rc = -EINVAL; } else { *valp = val; update_defense_level(ipvs); } } return rc; } static int proc_do_sync_threshold(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; struct ctl_table tmp = { .data = &val, .maxlen = table->maxlen, .mode = table->mode, }; mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (val[0] < 0 || val[1] < 0 || (val[0] >= val[1] && val[1])) rc = -EINVAL; else memcpy(valp, val, sizeof(val)); } mutex_unlock(&ipvs->sync_mutex); return rc; } static int proc_do_sync_ports(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int val = *valp; int rc; struct ctl_table tmp = { .data = &val, .maxlen = sizeof(int), .mode = table->mode, }; rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write && (*valp != val)) { if (val < 1 || !is_power_of_2(val)) rc = -EINVAL; else *valp = val; } return rc; } static int ipvs_proc_est_cpumask_set(const struct ctl_table *table, void *buffer) { struct netns_ipvs *ipvs = table->extra2; cpumask_var_t *valp = table->data; cpumask_var_t newmask; int ret; if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) return -ENOMEM; ret = cpulist_parse(buffer, newmask); if (ret) goto out; mutex_lock(&ipvs->est_mutex); if (!ipvs->est_cpulist_valid) { if (!zalloc_cpumask_var(valp, GFP_KERNEL)) { ret = -ENOMEM; goto unlock; } ipvs->est_cpulist_valid = 1; } cpumask_and(newmask, newmask, &current->cpus_mask); cpumask_copy(*valp, newmask); /* est_max_threads may depend on cpulist size */ ipvs->est_max_threads = ip_vs_est_max_threads(ipvs); ipvs->est_calc_phase = 1; ip_vs_est_reload_start(ipvs); unlock: mutex_unlock(&ipvs->est_mutex); out: free_cpumask_var(newmask); return ret; } static int ipvs_proc_est_cpumask_get(const struct ctl_table *table, void *buffer, size_t size) { struct netns_ipvs *ipvs = table->extra2; cpumask_var_t *valp = table->data; struct cpumask *mask; int ret; mutex_lock(&ipvs->est_mutex); if (ipvs->est_cpulist_valid) mask = *valp; else mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD); ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask)); mutex_unlock(&ipvs->est_mutex); return ret; } static int ipvs_proc_est_cpulist(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; /* Ignore both read and write(append) if *ppos not 0 */ if (*ppos || !*lenp) { *lenp = 0; return 0; } if (write) { /* proc_sys_call_handler() appends terminator */ ret = ipvs_proc_est_cpumask_set(table, buffer); if (ret >= 0) *ppos += *lenp; } else { /* proc_sys_call_handler() allocates 1 byte for terminator */ ret = ipvs_proc_est_cpumask_get(table, buffer, *lenp + 1); if (ret >= 0) { *lenp = ret; *ppos += *lenp; ret = 0; } } return ret; } static int ipvs_proc_est_nice(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val = *valp; int ret; struct ctl_table tmp_table = { .data = &val, .maxlen = sizeof(int), .mode = table->mode, }; ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); if (write && ret >= 0) { if (val < MIN_NICE || val > MAX_NICE) { ret = -EINVAL; } else { mutex_lock(&ipvs->est_mutex); if (*valp != val) { *valp = val; ip_vs_est_reload_start(ipvs); } mutex_unlock(&ipvs->est_mutex); } } return ret; } static int ipvs_proc_run_estimation(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val = *valp; int ret; struct ctl_table tmp_table = { .data = &val, .maxlen = sizeof(int), .mode = table->mode, }; ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); if (write && ret >= 0) { mutex_lock(&ipvs->est_mutex); if (*valp != val) { *valp = val; ip_vs_est_reload_start(ipvs); } mutex_unlock(&ipvs->est_mutex); } return ret; } /* * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/) * Do not change order or insert new entries without * align with netns init in ip_vs_control_net_init() */ static struct ctl_table vs_vars[] = { { .procname = "amemthresh", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "am_droprate", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "drop_entry", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "drop_packet", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, #ifdef CONFIG_IP_VS_NFCT { .procname = "conntrack", .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, }, #endif { .procname = "secure_tcp", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "snat_reroute", .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, }, { .procname = "sync_version", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { .procname = "sync_ports", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_sync_ports, }, { .procname = "sync_persist_mode", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sync_qlen_max", .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "sync_sock_size", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "cache_bypass", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "expire_nodest_conn", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sloppy_tcp", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sloppy_sctp", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "expire_quiescent_template", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sync_threshold", .maxlen = sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold), .mode = 0644, .proc_handler = proc_do_sync_threshold, }, { .procname = "sync_refresh_period", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "sync_retries", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_THREE, }, { .procname = "nat_icmp_send", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "pmtu_disc", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "backup_only", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "conn_reuse_mode", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "schedule_icmp", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "ignore_tunneled", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "run_estimation", .maxlen = sizeof(int), .mode = 0644, .proc_handler = ipvs_proc_run_estimation, }, { .procname = "est_cpulist", .maxlen = NR_CPUS, /* unused */ .mode = 0644, .proc_handler = ipvs_proc_est_cpulist, }, { .procname = "est_nice", .maxlen = sizeof(int), .mode = 0644, .proc_handler = ipvs_proc_est_nice, }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", .data = &sysctl_ip_vs_debug_level, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif }; #endif #ifdef CONFIG_PROC_FS struct ip_vs_iter { struct seq_net_private p; /* Do not move this, netns depends upon it*/ struct hlist_head *table; int bucket; }; /* * Write the contents of the VS rule table to a PROCfs file. * (It is kept just for backward compatibility) */ static inline const char *ip_vs_fwd_name(unsigned int flags) { switch (flags & IP_VS_CONN_F_FWD_MASK) { case IP_VS_CONN_F_LOCALNODE: return "Local"; case IP_VS_CONN_F_TUNNEL: return "Tunnel"; case IP_VS_CONN_F_DROUTE: return "Route"; default: return "Masq"; } } /* Get the Nth entry in the two lists */ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) { struct net *net = seq_file_net(seq); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_iter *iter = seq->private; int idx; struct ip_vs_service *svc; /* look in hash by protocol */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) { if ((svc->ipvs == ipvs) && pos-- == 0) { iter->table = ip_vs_svc_table; iter->bucket = idx; return svc; } } } /* keep looking in fwmark */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx], f_list) { if ((svc->ipvs == ipvs) && pos-- == 0) { iter->table = ip_vs_svc_fwm_table; iter->bucket = idx; return svc; } } } return NULL; } static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct hlist_node *e; struct ip_vs_iter *iter; struct ip_vs_service *svc; ++*pos; if (v == SEQ_START_TOKEN) return ip_vs_info_array(seq,0); svc = v; iter = seq->private; if (iter->table == ip_vs_svc_table) { /* next service in table hashed by protocol */ e = rcu_dereference(hlist_next_rcu(&svc->s_list)); if (e) return hlist_entry(e, struct ip_vs_service, s_list); while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[iter->bucket], s_list) { return svc; } } iter->table = ip_vs_svc_fwm_table; iter->bucket = -1; goto scan_fwmark; } /* next service in hashed by fwmark */ e = rcu_dereference(hlist_next_rcu(&svc->f_list)); if (e) return hlist_entry(e, struct ip_vs_service, f_list); scan_fwmark: while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[iter->bucket], f_list) return svc; } return NULL; } static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ip_vs_info_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, "IP Virtual Server version %d.%d.%d (size=%d)\n", NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size); seq_puts(seq, "Prot LocalAddress:Port Scheduler Flags\n"); seq_puts(seq, " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n"); } else { struct net *net = seq_file_net(seq); struct netns_ipvs *ipvs = net_ipvs(net); const struct ip_vs_service *svc = v; const struct ip_vs_iter *iter = seq->private; const struct ip_vs_dest *dest; struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); char *sched_name = sched ? sched->name : "none"; if (svc->ipvs != ipvs) return 0; if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) seq_printf(seq, "%s [%pI6]:%04X %s ", ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), sched_name); else #endif seq_printf(seq, "%s %08X:%04X %s %s ", ip_vs_proto_name(svc->protocol), ntohl(svc->addr.ip), ntohs(svc->port), sched_name, (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } else { seq_printf(seq, "FWM %08X %s %s", svc->fwmark, sched_name, (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } if (svc->flags & IP_VS_SVC_F_PERSISTENT) seq_printf(seq, "persistent %d %08X\n", svc->timeout, ntohl(svc->netmask)); else seq_putc(seq, '\n'); list_for_each_entry_rcu(dest, &svc->destinations, n_list) { #ifdef CONFIG_IP_VS_IPV6 if (dest->af == AF_INET6) seq_printf(seq, " -> [%pI6]:%04X" " %-7s %-6d %-10d %-10d\n", &dest->addr.in6, ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); else #endif seq_printf(seq, " -> %08X:%04X " "%-7s %-6d %-10d %-10d\n", ntohl(dest->addr.ip), ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); } } return 0; } static const struct seq_operations ip_vs_info_seq_ops = { .start = ip_vs_info_seq_start, .next = ip_vs_info_seq_next, .stop = ip_vs_info_seq_stop, .show = ip_vs_info_seq_show, }; static int ip_vs_stats_show(struct seq_file *seq, void *v) { struct net *net = seq_file_single_net(seq); struct ip_vs_kstats show; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); seq_puts(seq, " Conns Packets Packets Bytes Bytes\n"); ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats->s); seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n", (unsigned long long)show.conns, (unsigned long long)show.inpkts, (unsigned long long)show.outpkts, (unsigned long long)show.inbytes, (unsigned long long)show.outbytes); /* 01234567 01234567 01234567 0123456701234567 0123456701234567*/ seq_puts(seq, " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n", (unsigned long long)show.cps, (unsigned long long)show.inpps, (unsigned long long)show.outpps, (unsigned long long)show.inbps, (unsigned long long)show.outbps); return 0; } static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) { struct net *net = seq_file_single_net(seq); struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats->s; struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats; struct ip_vs_kstats kstats; int i; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); seq_puts(seq, "CPU Conns Packets Packets Bytes Bytes\n"); for_each_possible_cpu(i) { struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); unsigned int start; u64 conns, inpkts, outpkts, inbytes, outbytes; do { start = u64_stats_fetch_begin(&u->syncp); conns = u64_stats_read(&u->cnt.conns); inpkts = u64_stats_read(&u->cnt.inpkts); outpkts = u64_stats_read(&u->cnt.outpkts); inbytes = u64_stats_read(&u->cnt.inbytes); outbytes = u64_stats_read(&u->cnt.outbytes); } while (u64_stats_fetch_retry(&u->syncp, start)); seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n", i, (u64)conns, (u64)inpkts, (u64)outpkts, (u64)inbytes, (u64)outbytes); } ip_vs_copy_stats(&kstats, tot_stats); seq_printf(seq, " ~ %8LX %8LX %8LX %16LX %16LX\n\n", (unsigned long long)kstats.conns, (unsigned long long)kstats.inpkts, (unsigned long long)kstats.outpkts, (unsigned long long)kstats.inbytes, (unsigned long long)kstats.outbytes); /* ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); seq_printf(seq, " %8LX %8LX %8LX %16LX %16LX\n", kstats.cps, kstats.inpps, kstats.outpps, kstats.inbps, kstats.outbps); return 0; } #endif /* * Set timeout values for tcp tcpfin udp in the timeout_table. */ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u) { #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) struct ip_vs_proto_data *pd; #endif IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n", u->tcp_timeout, u->tcp_fin_timeout, u->udp_timeout); #ifdef CONFIG_IP_VS_PROTO_TCP if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) || u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) { return -EINVAL; } #endif #ifdef CONFIG_IP_VS_PROTO_UDP if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ)) return -EINVAL; #endif #ifdef CONFIG_IP_VS_PROTO_TCP if (u->tcp_timeout) { pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] = u->tcp_timeout * HZ; } if (u->tcp_fin_timeout) { pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] = u->tcp_fin_timeout * HZ; } #endif #ifdef CONFIG_IP_VS_PROTO_UDP if (u->udp_timeout) { pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); pd->timeout_table[IP_VS_UDP_S_NORMAL] = u->udp_timeout * HZ; } #endif return 0; } #define CMDID(cmd) (cmd - IP_VS_BASE_CTL) struct ip_vs_svcdest_user { struct ip_vs_service_user s; struct ip_vs_dest_user d; }; static const unsigned char set_arglen[CMDID(IP_VS_SO_SET_MAX) + 1] = { [CMDID(IP_VS_SO_SET_ADD)] = sizeof(struct ip_vs_service_user), [CMDID(IP_VS_SO_SET_EDIT)] = sizeof(struct ip_vs_service_user), [CMDID(IP_VS_SO_SET_DEL)] = sizeof(struct ip_vs_service_user), [CMDID(IP_VS_SO_SET_ADDDEST)] = sizeof(struct ip_vs_svcdest_user), [CMDID(IP_VS_SO_SET_DELDEST)] = sizeof(struct ip_vs_svcdest_user), [CMDID(IP_VS_SO_SET_EDITDEST)] = sizeof(struct ip_vs_svcdest_user), [CMDID(IP_VS_SO_SET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), [CMDID(IP_VS_SO_SET_STARTDAEMON)] = sizeof(struct ip_vs_daemon_user), [CMDID(IP_VS_SO_SET_STOPDAEMON)] = sizeof(struct ip_vs_daemon_user), [CMDID(IP_VS_SO_SET_ZERO)] = sizeof(struct ip_vs_service_user), }; union ip_vs_set_arglen { struct ip_vs_service_user field_IP_VS_SO_SET_ADD; struct ip_vs_service_user field_IP_VS_SO_SET_EDIT; struct ip_vs_service_user field_IP_VS_SO_SET_DEL; struct ip_vs_svcdest_user field_IP_VS_SO_SET_ADDDEST; struct ip_vs_svcdest_user field_IP_VS_SO_SET_DELDEST; struct ip_vs_svcdest_user field_IP_VS_SO_SET_EDITDEST; struct ip_vs_timeout_user field_IP_VS_SO_SET_TIMEOUT; struct ip_vs_daemon_user field_IP_VS_SO_SET_STARTDAEMON; struct ip_vs_daemon_user field_IP_VS_SO_SET_STOPDAEMON; struct ip_vs_service_user field_IP_VS_SO_SET_ZERO; }; #define MAX_SET_ARGLEN sizeof(union ip_vs_set_arglen) static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, struct ip_vs_service_user *usvc_compat) { memset(usvc, 0, sizeof(*usvc)); usvc->af = AF_INET; usvc->protocol = usvc_compat->protocol; usvc->addr.ip = usvc_compat->addr; usvc->port = usvc_compat->port; usvc->fwmark = usvc_compat->fwmark; /* Deep copy of sched_name is not needed here */ usvc->sched_name = usvc_compat->sched_name; usvc->flags = usvc_compat->flags; usvc->timeout = usvc_compat->timeout; usvc->netmask = usvc_compat->netmask; } static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, struct ip_vs_dest_user *udest_compat) { memset(udest, 0, sizeof(*udest)); udest->addr.ip = udest_compat->addr; udest->port = udest_compat->port; udest->conn_flags = udest_compat->conn_flags; udest->weight = udest_compat->weight; udest->u_threshold = udest_compat->u_threshold; udest->l_threshold = udest_compat->l_threshold; udest->af = AF_INET; udest->tun_type = IP_VS_CONN_F_TUNNEL_TYPE_IPIP; } static int do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len) { struct net *net = sock_net(sk); int ret; unsigned char arg[MAX_SET_ARGLEN]; struct ip_vs_service_user *usvc_compat; struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; struct ip_vs_dest_user *udest_compat; struct ip_vs_dest_user_kern udest; struct netns_ipvs *ipvs = net_ipvs(net); BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) return -EINVAL; if (len != set_arglen[CMDID(cmd)]) { IP_VS_DBG(1, "set_ctl: len %u != %u\n", len, set_arglen[CMDID(cmd)]); return -EINVAL; } if (copy_from_sockptr(arg, ptr, len) != 0) return -EFAULT; /* Handle daemons since they have another lock */ if (cmd == IP_VS_SO_SET_STARTDAEMON || cmd == IP_VS_SO_SET_STOPDAEMON) { struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; if (cmd == IP_VS_SO_SET_STARTDAEMON) { struct ipvs_sync_daemon_cfg cfg; memset(&cfg, 0, sizeof(cfg)); ret = -EINVAL; if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, sizeof(cfg.mcast_ifn)) <= 0) return ret; cfg.syncid = dm->syncid; ret = start_sync_thread(ipvs, &cfg, dm->state); } else { ret = stop_sync_thread(ipvs, dm->state); } return ret; } mutex_lock(&__ip_vs_mutex); if (cmd == IP_VS_SO_SET_FLUSH) { /* Flush the virtual service */ ret = ip_vs_flush(ipvs, false); goto out_unlock; } else if (cmd == IP_VS_SO_SET_TIMEOUT) { /* Set timeout values for (tcp tcpfin udp) */ ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg); goto out_unlock; } else if (!len) { /* No more commands with len == 0 below */ ret = -EINVAL; goto out_unlock; } usvc_compat = (struct ip_vs_service_user *)arg; udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1); /* We only use the new structs internally, so copy userspace compat * structs to extended internal versions */ ip_vs_copy_usvc_compat(&usvc, usvc_compat); ip_vs_copy_udest_compat(&udest, udest_compat); if (cmd == IP_VS_SO_SET_ZERO) { /* if no service address is set, zero counters in all */ if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) { ret = ip_vs_zero_all(ipvs); goto out_unlock; } } if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) && strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) == IP_VS_SCHEDNAME_MAXLEN) { ret = -EINVAL; goto out_unlock; } /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && usvc.protocol != IPPROTO_SCTP) { pr_err("set_ctl: invalid protocol: %d %pI4:%d\n", usvc.protocol, &usvc.addr.ip, ntohs(usvc.port)); ret = -EFAULT; goto out_unlock; } /* Lookup the exact service by <protocol, addr, port> or fwmark */ rcu_read_lock(); if (usvc.fwmark == 0) svc = __ip_vs_service_find(ipvs, usvc.af, usvc.protocol, &usvc.addr, usvc.port); else svc = __ip_vs_svc_fwm_find(ipvs, usvc.af, usvc.fwmark); rcu_read_unlock(); if (cmd != IP_VS_SO_SET_ADD && (svc == NULL || svc->protocol != usvc.protocol)) { ret = -ESRCH; goto out_unlock; } switch (cmd) { case IP_VS_SO_SET_ADD: if (svc != NULL) ret = -EEXIST; else ret = ip_vs_add_service(ipvs, &usvc, &svc); break; case IP_VS_SO_SET_EDIT: ret = ip_vs_edit_service(svc, &usvc); break; case IP_VS_SO_SET_DEL: ret = ip_vs_del_service(svc); if (!ret) goto out_unlock; break; case IP_VS_SO_SET_ZERO: ret = ip_vs_zero_service(svc); break; case IP_VS_SO_SET_ADDDEST: ret = ip_vs_add_dest(svc, &udest); break; case IP_VS_SO_SET_EDITDEST: ret = ip_vs_edit_dest(svc, &udest); break; case IP_VS_SO_SET_DELDEST: ret = ip_vs_del_dest(svc, &udest); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } out_unlock: mutex_unlock(&__ip_vs_mutex); return ret; } static void ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) { struct ip_vs_scheduler *sched; struct ip_vs_kstats kstats; char *sched_name; sched = rcu_dereference_protected(src->scheduler, 1); sched_name = sched ? sched->name : "none"; dst->protocol = src->protocol; dst->addr = src->addr.ip; dst->port = src->port; dst->fwmark = src->fwmark; strscpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; dst->num_dests = src->num_dests; ip_vs_copy_stats(&kstats, &src->stats); ip_vs_export_stats_user(&dst->stats, &kstats); } static inline int __ip_vs_get_service_entries(struct netns_ipvs *ipvs, const struct ip_vs_get_services *get, struct ip_vs_get_services __user *uptr) { int idx, count=0; struct ip_vs_service *svc; struct ip_vs_service_entry entry; int ret = 0; for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { /* Only expose IPv4 entries to old interface */ if (svc->af != AF_INET || (svc->ipvs != ipvs)) continue; if (count >= get->num_services) goto out; memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; goto out; } count++; } } for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { /* Only expose IPv4 entries to old interface */ if (svc->af != AF_INET || (svc->ipvs != ipvs)) continue; if (count >= get->num_services) goto out; memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; goto out; } count++; } } out: return ret; } static inline int __ip_vs_get_dest_entries(struct netns_ipvs *ipvs, const struct ip_vs_get_dests *get, struct ip_vs_get_dests __user *uptr) { struct ip_vs_service *svc; union nf_inet_addr addr = { .ip = get->addr }; int ret = 0; rcu_read_lock(); if (get->fwmark) svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, get->fwmark); else svc = __ip_vs_service_find(ipvs, AF_INET, get->protocol, &addr, get->port); rcu_read_unlock(); if (svc) { int count = 0; struct ip_vs_dest *dest; struct ip_vs_dest_entry entry; struct ip_vs_kstats kstats; memset(&entry, 0, sizeof(entry)); list_for_each_entry(dest, &svc->destinations, n_list) { if (count >= get->num_dests) break; /* Cannot expose heterogeneous members via sockopt * interface */ if (dest->af != svc->af) continue; entry.addr = dest->addr.ip; entry.port = dest->port; entry.conn_flags = atomic_read(&dest->conn_flags); entry.weight = atomic_read(&dest->weight); entry.u_threshold = dest->u_threshold; entry.l_threshold = dest->l_threshold; entry.activeconns = atomic_read(&dest->activeconns); entry.inactconns = atomic_read(&dest->inactconns); entry.persistconns = atomic_read(&dest->persistconns); ip_vs_copy_stats(&kstats, &dest->stats); ip_vs_export_stats_user(&entry.stats, &kstats); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; break; } count++; } } else ret = -ESRCH; return ret; } static inline void __ip_vs_get_timeouts(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u) { #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) struct ip_vs_proto_data *pd; #endif memset(u, 0, sizeof (*u)); #ifdef CONFIG_IP_VS_PROTO_TCP pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; #endif #ifdef CONFIG_IP_VS_PROTO_UDP pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); u->udp_timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ; #endif } static const unsigned char get_arglen[CMDID(IP_VS_SO_GET_MAX) + 1] = { [CMDID(IP_VS_SO_GET_VERSION)] = 64, [CMDID(IP_VS_SO_GET_INFO)] = sizeof(struct ip_vs_getinfo), [CMDID(IP_VS_SO_GET_SERVICES)] = sizeof(struct ip_vs_get_services), [CMDID(IP_VS_SO_GET_SERVICE)] = sizeof(struct ip_vs_service_entry), [CMDID(IP_VS_SO_GET_DESTS)] = sizeof(struct ip_vs_get_dests), [CMDID(IP_VS_SO_GET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), [CMDID(IP_VS_SO_GET_DAEMON)] = 2 * sizeof(struct ip_vs_daemon_user), }; union ip_vs_get_arglen { char field_IP_VS_SO_GET_VERSION[64]; struct ip_vs_getinfo field_IP_VS_SO_GET_INFO; struct ip_vs_get_services field_IP_VS_SO_GET_SERVICES; struct ip_vs_service_entry field_IP_VS_SO_GET_SERVICE; struct ip_vs_get_dests field_IP_VS_SO_GET_DESTS; struct ip_vs_timeout_user field_IP_VS_SO_GET_TIMEOUT; struct ip_vs_daemon_user field_IP_VS_SO_GET_DAEMON[2]; }; #define MAX_GET_ARGLEN sizeof(union ip_vs_get_arglen) static int do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { unsigned char arg[MAX_GET_ARGLEN]; int ret = 0; unsigned int copylen; struct net *net = sock_net(sk); struct netns_ipvs *ipvs = net_ipvs(net); BUG_ON(!net); BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) return -EINVAL; copylen = get_arglen[CMDID(cmd)]; if (*len < (int) copylen) { IP_VS_DBG(1, "get_ctl: len %d < %u\n", *len, copylen); return -EINVAL; } if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; /* * Handle daemons first since it has its own locking */ if (cmd == IP_VS_SO_GET_DAEMON) { struct ip_vs_daemon_user d[2]; memset(&d, 0, sizeof(d)); mutex_lock(&ipvs->sync_mutex); if (ipvs->sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; strscpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ipvs->mcfg.syncid; } if (ipvs->sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; strscpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ipvs->bcfg.syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) ret = -EFAULT; mutex_unlock(&ipvs->sync_mutex); return ret; } mutex_lock(&__ip_vs_mutex); switch (cmd) { case IP_VS_SO_GET_VERSION: { char buf[64]; sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size); if (copy_to_user(user, buf, strlen(buf)+1) != 0) { ret = -EFAULT; goto out; } *len = strlen(buf)+1; } break; case IP_VS_SO_GET_INFO: { struct ip_vs_getinfo info; info.version = IP_VS_VERSION_CODE; info.size = ip_vs_conn_tab_size; info.num_services = ipvs->num_services; if (copy_to_user(user, &info, sizeof(info)) != 0) ret = -EFAULT; } break; case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; int size; get = (struct ip_vs_get_services *)arg; size = struct_size(get, entrytable, get->num_services); if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_service_entries(ipvs, get, user); } break; case IP_VS_SO_GET_SERVICE: { struct ip_vs_service_entry *entry; struct ip_vs_service *svc; union nf_inet_addr addr; entry = (struct ip_vs_service_entry *)arg; addr.ip = entry->addr; rcu_read_lock(); if (entry->fwmark) svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, entry->fwmark); else svc = __ip_vs_service_find(ipvs, AF_INET, entry->protocol, &addr, entry->port); rcu_read_unlock(); if (svc) { ip_vs_copy_service(entry, svc); if (copy_to_user(user, entry, sizeof(*entry)) != 0) ret = -EFAULT; } else ret = -ESRCH; } break; case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; int size; get = (struct ip_vs_get_dests *)arg; size = struct_size(get, entrytable, get->num_dests); if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_dest_entries(ipvs, get, user); } break; case IP_VS_SO_GET_TIMEOUT: { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(ipvs, &t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; } break; default: ret = -EINVAL; } out: mutex_unlock(&__ip_vs_mutex); return ret; } static struct nf_sockopt_ops ip_vs_sockopts = { .pf = PF_INET, .set_optmin = IP_VS_BASE_CTL, .set_optmax = IP_VS_SO_SET_MAX+1, .set = do_ip_vs_set_ctl, .get_optmin = IP_VS_BASE_CTL, .get_optmax = IP_VS_SO_GET_MAX+1, .get = do_ip_vs_get_ctl, .owner = THIS_MODULE, }; /* * Generic Netlink interface */ /* IPVS genetlink family */ static struct genl_family ip_vs_genl_family; /* Policy used for first-level command attributes */ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = { [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 }, [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 }, [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */ static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, .len = IP_VS_IFNAME_MAXLEN - 1 }, [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_MCAST_GROUP6] = { .len = sizeof(struct in6_addr) }, [IPVS_DAEMON_ATTR_MCAST_PORT] = { .type = NLA_U16 }, [IPVS_DAEMON_ATTR_MCAST_TTL] = { .type = NLA_U8 }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = { [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY, .len = sizeof(union nf_inet_addr) }, [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, .len = IP_VS_SCHEDNAME_MAXLEN - 1 }, [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, .len = IP_VS_PENAME_MAXLEN }, [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, .len = sizeof(struct ip_vs_flags) }, [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY, .len = sizeof(union nf_inet_addr) }, [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 }, [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED }, [IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 }, [IPVS_DEST_ATTR_TUN_TYPE] = { .type = NLA_U8 }, [IPVS_DEST_ATTR_TUN_PORT] = { .type = NLA_U16 }, [IPVS_DEST_ATTR_TUN_FLAGS] = { .type = NLA_U16 }, }; static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, struct ip_vs_kstats *kstats) { struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type); if (!nl_stats) return -EMSGSIZE; if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) || nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes, IPVS_STATS_ATTR_PAD) || nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) || nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) || nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps)) goto nla_put_failure; nla_nest_end(skb, nl_stats); return 0; nla_put_failure: nla_nest_cancel(skb, nl_stats); return -EMSGSIZE; } static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type, struct ip_vs_kstats *kstats) { struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type); if (!nl_stats) return -EMSGSIZE; if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps, IPVS_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps, IPVS_STATS_ATTR_PAD)) goto nla_put_failure; nla_nest_end(skb, nl_stats); return 0; nla_put_failure: nla_nest_cancel(skb, nl_stats); return -EMSGSIZE; } static int ip_vs_genl_fill_service(struct sk_buff *skb, struct ip_vs_service *svc) { struct ip_vs_scheduler *sched; struct ip_vs_pe *pe; struct nlattr *nl_service; struct ip_vs_flags flags = { .flags = svc->flags, .mask = ~0 }; struct ip_vs_kstats kstats; char *sched_name; nl_service = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_SERVICE); if (!nl_service) return -EMSGSIZE; if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af)) goto nla_put_failure; if (svc->fwmark) { if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark)) goto nla_put_failure; } else { if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) || nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) || nla_put_be16(skb, IPVS_SVC_ATTR_PORT, svc->port)) goto nla_put_failure; } sched = rcu_dereference_protected(svc->scheduler, 1); sched_name = sched ? sched->name : "none"; pe = rcu_dereference_protected(svc->pe, 1); if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) || (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask)) goto nla_put_failure; ip_vs_copy_stats(&kstats, &svc->stats); if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats)) goto nla_put_failure; if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats)) goto nla_put_failure; nla_nest_end(skb, nl_service); return 0; nla_put_failure: nla_nest_cancel(skb, nl_service); return -EMSGSIZE; } static int ip_vs_genl_dump_service(struct sk_buff *skb, struct ip_vs_service *svc, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_SERVICE); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_service(skb, svc) < 0) goto nla_put_failure; genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_services(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0, i; int start = cb->args[0]; struct ip_vs_service *svc; struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&__ip_vs_mutex); for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { if (++idx <= start || (svc->ipvs != ipvs)) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; goto nla_put_failure; } } } for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { if (++idx <= start || (svc->ipvs != ipvs)) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; goto nla_put_failure; } } } nla_put_failure: mutex_unlock(&__ip_vs_mutex); cb->args[0] = idx; return skb->len; } static bool ip_vs_is_af_valid(int af) { if (af == AF_INET) return true; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6 && ipv6_mod_enabled()) return true; #endif return false; } static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *usvc, struct nlattr *nla, bool full_entry, struct ip_vs_service **ret_svc) { struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1]; struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr; struct ip_vs_service *svc; /* Parse mandatory identifying service fields first */ if (nla == NULL || nla_parse_nested_deprecated(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy, NULL)) return -EINVAL; nla_af = attrs[IPVS_SVC_ATTR_AF]; nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL]; nla_addr = attrs[IPVS_SVC_ATTR_ADDR]; nla_port = attrs[IPVS_SVC_ATTR_PORT]; nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK]; if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) return -EINVAL; memset(usvc, 0, sizeof(*usvc)); usvc->af = nla_get_u16(nla_af); if (!ip_vs_is_af_valid(usvc->af)) return -EAFNOSUPPORT; if (nla_fwmark) { usvc->protocol = IPPROTO_TCP; usvc->fwmark = nla_get_u32(nla_fwmark); } else { usvc->protocol = nla_get_u16(nla_protocol); nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr)); usvc->port = nla_get_be16(nla_port); usvc->fwmark = 0; } rcu_read_lock(); if (usvc->fwmark) svc = __ip_vs_svc_fwm_find(ipvs, usvc->af, usvc->fwmark); else svc = __ip_vs_service_find(ipvs, usvc->af, usvc->protocol, &usvc->addr, usvc->port); rcu_read_unlock(); *ret_svc = svc; /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_sched, *nla_flags, *nla_pe, *nla_timeout, *nla_netmask; struct ip_vs_flags flags; nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME]; nla_pe = attrs[IPVS_SVC_ATTR_PE_NAME]; nla_flags = attrs[IPVS_SVC_ATTR_FLAGS]; nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT]; nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK]; if (!(nla_sched && nla_flags && nla_timeout && nla_netmask)) return -EINVAL; nla_memcpy(&flags, nla_flags, sizeof(flags)); /* prefill flags from service if it already exists */ if (svc) usvc->flags = svc->flags; /* set new flags from userland */ usvc->flags = (usvc->flags & ~flags.mask) | (flags.flags & flags.mask); usvc->sched_name = nla_data(nla_sched); usvc->pe_name = nla_pe ? nla_data(nla_pe) : NULL; usvc->timeout = nla_get_u32(nla_timeout); usvc->netmask = nla_get_be32(nla_netmask); } return 0; } static struct ip_vs_service *ip_vs_genl_find_service(struct netns_ipvs *ipvs, struct nlattr *nla) { struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; int ret; ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, false, &svc); return ret ? ERR_PTR(ret) : svc; } static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) { struct nlattr *nl_dest; struct ip_vs_kstats kstats; nl_dest = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DEST); if (!nl_dest) return -EMSGSIZE; if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) || nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) || nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD, (atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK)) || nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)) || nla_put_u8(skb, IPVS_DEST_ATTR_TUN_TYPE, dest->tun_type) || nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT, dest->tun_port) || nla_put_u16(skb, IPVS_DEST_ATTR_TUN_FLAGS, dest->tun_flags) || nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) || nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) || nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, atomic_read(&dest->activeconns)) || nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS, atomic_read(&dest->inactconns)) || nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, atomic_read(&dest->persistconns)) || nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af)) goto nla_put_failure; ip_vs_copy_stats(&kstats, &dest->stats); if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats)) goto nla_put_failure; if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats)) goto nla_put_failure; nla_nest_end(skb, nl_dest); return 0; nla_put_failure: nla_nest_cancel(skb, nl_dest); return -EMSGSIZE; } static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_DEST); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_dest(skb, dest) < 0) goto nla_put_failure; genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_dests(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; int start = cb->args[0]; struct ip_vs_service *svc; struct ip_vs_dest *dest; struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&__ip_vs_mutex); /* Try to find the service for which to dump destinations */ if (nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy, cb->extack)) goto out_err; svc = ip_vs_genl_find_service(ipvs, attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR_OR_NULL(svc)) goto out_err; /* Dump the destinations */ list_for_each_entry(dest, &svc->destinations, n_list) { if (++idx <= start) continue; if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) { idx--; goto nla_put_failure; } } nla_put_failure: cb->args[0] = idx; out_err: mutex_unlock(&__ip_vs_mutex); return skb->len; } static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, struct nlattr *nla, bool full_entry) { struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1]; struct nlattr *nla_addr, *nla_port; struct nlattr *nla_addr_family; /* Parse mandatory identifying destination fields first */ if (nla == NULL || nla_parse_nested_deprecated(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy, NULL)) return -EINVAL; nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; nla_port = attrs[IPVS_DEST_ATTR_PORT]; nla_addr_family = attrs[IPVS_DEST_ATTR_ADDR_FAMILY]; if (!(nla_addr && nla_port)) return -EINVAL; memset(udest, 0, sizeof(*udest)); nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); udest->port = nla_get_be16(nla_port); udest->af = nla_get_u16_default(nla_addr_family, 0); /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, *nla_l_thresh, *nla_tun_type, *nla_tun_port, *nla_tun_flags; nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD]; nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT]; nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH]; nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH]; nla_tun_type = attrs[IPVS_DEST_ATTR_TUN_TYPE]; nla_tun_port = attrs[IPVS_DEST_ATTR_TUN_PORT]; nla_tun_flags = attrs[IPVS_DEST_ATTR_TUN_FLAGS]; if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh)) return -EINVAL; udest->conn_flags = nla_get_u32(nla_fwd) & IP_VS_CONN_F_FWD_MASK; udest->weight = nla_get_u32(nla_weight); udest->u_threshold = nla_get_u32(nla_u_thresh); udest->l_threshold = nla_get_u32(nla_l_thresh); if (nla_tun_type) udest->tun_type = nla_get_u8(nla_tun_type); if (nla_tun_port) udest->tun_port = nla_get_be16(nla_tun_port); if (nla_tun_flags) udest->tun_flags = nla_get_u16(nla_tun_flags); } return 0; } static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state, struct ipvs_sync_daemon_cfg *c) { struct nlattr *nl_daemon; nl_daemon = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DAEMON); if (!nl_daemon) return -EMSGSIZE; if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) || nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) || nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) || nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) || nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) || nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl)) goto nla_put_failure; #ifdef CONFIG_IP_VS_IPV6 if (c->mcast_af == AF_INET6) { if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6, &c->mcast_group.in6)) goto nla_put_failure; } else #endif if (c->mcast_af == AF_INET && nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP, c->mcast_group.ip)) goto nla_put_failure; nla_nest_end(skb, nl_daemon); return 0; nla_put_failure: nla_nest_cancel(skb, nl_daemon); return -EMSGSIZE; } static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state, struct ipvs_sync_daemon_cfg *c, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_DAEMON); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_daemon(skb, state, c)) goto nla_put_failure; genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_daemons(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&ipvs->sync_mutex); if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, &ipvs->mcfg, cb) < 0) goto nla_put_failure; cb->args[0] = 1; } if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP, &ipvs->bcfg, cb) < 0) goto nla_put_failure; cb->args[1] = 1; } nla_put_failure: mutex_unlock(&ipvs->sync_mutex); return skb->len; } static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) { struct ipvs_sync_daemon_cfg c; struct nlattr *a; int ret; memset(&c, 0, sizeof(c)); if (!(attrs[IPVS_DAEMON_ATTR_STATE] && attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && attrs[IPVS_DAEMON_ATTR_SYNC_ID])) return -EINVAL; strscpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), sizeof(c.mcast_ifn)); c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]); a = attrs[IPVS_DAEMON_ATTR_SYNC_MAXLEN]; if (a) c.sync_maxlen = nla_get_u16(a); a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP]; if (a) { c.mcast_af = AF_INET; c.mcast_group.ip = nla_get_in_addr(a); if (!ipv4_is_multicast(c.mcast_group.ip)) return -EINVAL; } else { a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP6]; if (a) { #ifdef CONFIG_IP_VS_IPV6 int addr_type; c.mcast_af = AF_INET6; c.mcast_group.in6 = nla_get_in6_addr(a); addr_type = ipv6_addr_type(&c.mcast_group.in6); if (!(addr_type & IPV6_ADDR_MULTICAST)) return -EINVAL; #else return -EAFNOSUPPORT; #endif } } a = attrs[IPVS_DAEMON_ATTR_MCAST_PORT]; if (a) c.mcast_port = nla_get_u16(a); a = attrs[IPVS_DAEMON_ATTR_MCAST_TTL]; if (a) c.mcast_ttl = nla_get_u8(a); /* The synchronization protocol is incompatible with mixed family * services */ if (ipvs->mixed_address_family_dests > 0) return -EINVAL; ret = start_sync_thread(ipvs, &c, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); return ret; } static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) { int ret; if (!attrs[IPVS_DAEMON_ATTR_STATE]) return -EINVAL; ret = stop_sync_thread(ipvs, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); return ret; } static int ip_vs_genl_set_config(struct netns_ipvs *ipvs, struct nlattr **attrs) { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(ipvs, &t); if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]) t.tcp_fin_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]); if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); return ip_vs_set_timeout(ipvs, &t); } static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info) { int ret = -EINVAL, cmd; struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || nla_parse_nested_deprecated(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], ip_vs_daemon_policy, info->extack)) goto out; if (cmd == IPVS_CMD_NEW_DAEMON) ret = ip_vs_genl_new_daemon(ipvs, daemon_attrs); else ret = ip_vs_genl_del_daemon(ipvs, daemon_attrs); } out: return ret; } static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) { bool need_full_svc = false, need_full_dest = false; struct ip_vs_service *svc = NULL; struct ip_vs_service_user_kern usvc; struct ip_vs_dest_user_kern udest; int ret = 0, cmd; struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; mutex_lock(&__ip_vs_mutex); if (cmd == IPVS_CMD_FLUSH) { ret = ip_vs_flush(ipvs, false); goto out; } else if (cmd == IPVS_CMD_SET_CONFIG) { ret = ip_vs_genl_set_config(ipvs, info->attrs); goto out; } else if (cmd == IPVS_CMD_ZERO && !info->attrs[IPVS_CMD_ATTR_SERVICE]) { ret = ip_vs_zero_all(ipvs); goto out; } /* All following commands require a service argument, so check if we * received a valid one. We need a full service specification when * adding / editing a service. Only identifying members otherwise. */ if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE) need_full_svc = true; ret = ip_vs_genl_parse_service(ipvs, &usvc, info->attrs[IPVS_CMD_ATTR_SERVICE], need_full_svc, &svc); if (ret) goto out; /* Unless we're adding a new service, the service must already exist */ if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) { ret = -ESRCH; goto out; } /* Destination commands require a valid destination argument. For * adding / editing a destination, we need a full destination * specification. */ if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST || cmd == IPVS_CMD_DEL_DEST) { if (cmd != IPVS_CMD_DEL_DEST) need_full_dest = true; ret = ip_vs_genl_parse_dest(&udest, info->attrs[IPVS_CMD_ATTR_DEST], need_full_dest); if (ret) goto out; /* Old protocols did not allow the user to specify address * family, so we set it to zero instead. We also didn't * allow heterogeneous pools in the old code, so it's safe * to assume that this will have the same address family as * the service. */ if (udest.af == 0) udest.af = svc->af; if (!ip_vs_is_af_valid(udest.af)) { ret = -EAFNOSUPPORT; goto out; } if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) { /* The synchronization protocol is incompatible * with mixed family services */ if (ipvs->sync_state) { ret = -EINVAL; goto out; } /* Which connection types do we support? */ switch (udest.conn_flags) { case IP_VS_CONN_F_TUNNEL: /* We are able to forward this */ break; default: ret = -EINVAL; goto out; } } } switch (cmd) { case IPVS_CMD_NEW_SERVICE: if (svc == NULL) ret = ip_vs_add_service(ipvs, &usvc, &svc); else ret = -EEXIST; break; case IPVS_CMD_SET_SERVICE: ret = ip_vs_edit_service(svc, &usvc); break; case IPVS_CMD_DEL_SERVICE: ret = ip_vs_del_service(svc); /* do not use svc, it can be freed */ break; case IPVS_CMD_NEW_DEST: ret = ip_vs_add_dest(svc, &udest); break; case IPVS_CMD_SET_DEST: ret = ip_vs_edit_dest(svc, &udest); break; case IPVS_CMD_DEL_DEST: ret = ip_vs_del_dest(svc, &udest); break; case IPVS_CMD_ZERO: ret = ip_vs_zero_service(svc); break; default: ret = -EINVAL; } out: mutex_unlock(&__ip_vs_mutex); return ret; } static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *reply; int ret, cmd, reply_cmd; struct net *net = sock_net(skb->sk); struct netns_ipvs *ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; if (cmd == IPVS_CMD_GET_SERVICE) reply_cmd = IPVS_CMD_NEW_SERVICE; else if (cmd == IPVS_CMD_GET_INFO) reply_cmd = IPVS_CMD_SET_INFO; else if (cmd == IPVS_CMD_GET_CONFIG) reply_cmd = IPVS_CMD_SET_CONFIG; else { pr_err("unknown Generic Netlink command\n"); return -EINVAL; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; mutex_lock(&__ip_vs_mutex); reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd); if (reply == NULL) goto nla_put_failure; switch (cmd) { case IPVS_CMD_GET_SERVICE: { struct ip_vs_service *svc; svc = ip_vs_genl_find_service(ipvs, info->attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR(svc)) { ret = PTR_ERR(svc); goto out_err; } else if (svc) { ret = ip_vs_genl_fill_service(msg, svc); if (ret) goto nla_put_failure; } else { ret = -ESRCH; goto out_err; } break; } case IPVS_CMD_GET_CONFIG: { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(ipvs, &t); #ifdef CONFIG_IP_VS_PROTO_TCP if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout) || nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, t.tcp_fin_timeout)) goto nla_put_failure; #endif #ifdef CONFIG_IP_VS_PROTO_UDP if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout)) goto nla_put_failure; #endif break; } case IPVS_CMD_GET_INFO: if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE) || nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, ip_vs_conn_tab_size)) goto nla_put_failure; break; } genlmsg_end(msg, reply); ret = genlmsg_reply(msg, info); goto out; nla_put_failure: pr_err("not enough space in Netlink message\n"); ret = -EMSGSIZE; out_err: nlmsg_free(msg); out: mutex_unlock(&__ip_vs_mutex); return ret; } static const struct genl_small_ops ip_vs_genl_ops[] = { { .cmd = IPVS_CMD_NEW_SERVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_SET_SERVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_DEL_SERVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_SERVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, .dumpit = ip_vs_genl_dump_services, }, { .cmd = IPVS_CMD_NEW_DEST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_SET_DEST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_DEL_DEST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_DEST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .dumpit = ip_vs_genl_dump_dests, }, { .cmd = IPVS_CMD_NEW_DAEMON, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_DEL_DAEMON, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_GET_DAEMON, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .dumpit = ip_vs_genl_dump_daemons, }, { .cmd = IPVS_CMD_SET_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, }, { .cmd = IPVS_CMD_GET_INFO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, }, { .cmd = IPVS_CMD_ZERO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_FLUSH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, }; static struct genl_family ip_vs_genl_family __ro_after_init = { .hdrsize = 0, .name = IPVS_GENL_NAME, .version = IPVS_GENL_VERSION, .maxattr = IPVS_CMD_ATTR_MAX, .policy = ip_vs_cmd_policy, .netnsok = true, /* Make ipvsadm to work on netns */ .module = THIS_MODULE, .small_ops = ip_vs_genl_ops, .n_small_ops = ARRAY_SIZE(ip_vs_genl_ops), .resv_start_op = IPVS_CMD_FLUSH + 1, }; static int __init ip_vs_genl_register(void) { return genl_register_family(&ip_vs_genl_family); } static void ip_vs_genl_unregister(void) { genl_unregister_family(&ip_vs_genl_family); } /* End of Generic Netlink interface definitions */ /* * per netns intit/exit func. */ #ifdef CONFIG_SYSCTL static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) { struct net *net = ipvs->net; struct ctl_table *tbl; int idx, ret; size_t ctl_table_size = ARRAY_SIZE(vs_vars); bool unpriv = net->user_ns != &init_user_ns; atomic_set(&ipvs->dropentry, 0); spin_lock_init(&ipvs->dropentry_lock); spin_lock_init(&ipvs->droppacket_lock); spin_lock_init(&ipvs->securetcp_lock); INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler); INIT_DELAYED_WORK(&ipvs->expire_nodest_conn_work, expire_nodest_conn_handler); ipvs->est_stopped = 0; if (!net_eq(net, &init_net)) { tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); if (tbl == NULL) return -ENOMEM; } else tbl = vs_vars; /* Initialize sysctl defaults */ for (idx = 0; idx < ARRAY_SIZE(vs_vars); idx++) { if (tbl[idx].proc_handler == proc_do_defense_mode) tbl[idx].extra2 = ipvs; } idx = 0; ipvs->sysctl_amemthresh = 1024; tbl[idx++].data = &ipvs->sysctl_amemthresh; ipvs->sysctl_am_droprate = 10; tbl[idx++].data = &ipvs->sysctl_am_droprate; tbl[idx++].data = &ipvs->sysctl_drop_entry; tbl[idx++].data = &ipvs->sysctl_drop_packet; #ifdef CONFIG_IP_VS_NFCT tbl[idx++].data = &ipvs->sysctl_conntrack; #endif tbl[idx++].data = &ipvs->sysctl_secure_tcp; ipvs->sysctl_snat_reroute = 1; tbl[idx++].data = &ipvs->sysctl_snat_reroute; ipvs->sysctl_sync_ver = 1; tbl[idx++].data = &ipvs->sysctl_sync_ver; ipvs->sysctl_sync_ports = 1; tbl[idx++].data = &ipvs->sysctl_sync_ports; tbl[idx++].data = &ipvs->sysctl_sync_persist_mode; ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32; if (unpriv) tbl[idx].mode = 0444; tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; ipvs->sysctl_sync_sock_size = 0; if (unpriv) tbl[idx].mode = 0444; tbl[idx++].data = &ipvs->sysctl_sync_sock_size; tbl[idx++].data = &ipvs->sysctl_cache_bypass; tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; tbl[idx++].data = &ipvs->sysctl_sloppy_tcp; tbl[idx++].data = &ipvs->sysctl_sloppy_sctp; tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3); tbl[idx++].data = &ipvs->sysctl_sync_retries; tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; ipvs->sysctl_pmtu_disc = 1; tbl[idx++].data = &ipvs->sysctl_pmtu_disc; tbl[idx++].data = &ipvs->sysctl_backup_only; ipvs->sysctl_conn_reuse_mode = 1; tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; ipvs->sysctl_run_estimation = 1; if (unpriv) tbl[idx].mode = 0444; tbl[idx].extra2 = ipvs; tbl[idx++].data = &ipvs->sysctl_run_estimation; ipvs->est_cpulist_valid = 0; if (unpriv) tbl[idx].mode = 0444; tbl[idx].extra2 = ipvs; tbl[idx++].data = &ipvs->sysctl_est_cpulist; ipvs->sysctl_est_nice = IPVS_EST_NICE; if (unpriv) tbl[idx].mode = 0444; tbl[idx].extra2 = ipvs; tbl[idx++].data = &ipvs->sysctl_est_nice; #ifdef CONFIG_IP_VS_DEBUG /* Global sysctls must be ro in non-init netns */ if (!net_eq(net, &init_net)) tbl[idx++].mode = 0444; #endif ret = -ENOMEM; ipvs->sysctl_hdr = register_net_sysctl_sz(net, "net/ipv4/vs", tbl, ctl_table_size); if (!ipvs->sysctl_hdr) goto err; ipvs->sysctl_tbl = tbl; ret = ip_vs_start_estimator(ipvs, &ipvs->tot_stats->s); if (ret < 0) goto err; /* Schedule defense work */ queue_delayed_work(system_long_wq, &ipvs->defense_work, DEFENSE_TIMER_PERIOD); return 0; err: unregister_net_sysctl_table(ipvs->sysctl_hdr); if (!net_eq(net, &init_net)) kfree(tbl); return ret; } static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { struct net *net = ipvs->net; cancel_delayed_work_sync(&ipvs->expire_nodest_conn_work); cancel_delayed_work_sync(&ipvs->defense_work); cancel_work_sync(&ipvs->defense_work.work); unregister_net_sysctl_table(ipvs->sysctl_hdr); ip_vs_stop_estimator(ipvs, &ipvs->tot_stats->s); if (ipvs->est_cpulist_valid) free_cpumask_var(ipvs->sysctl_est_cpulist); if (!net_eq(net, &init_net)) kfree(ipvs->sysctl_tbl); } #else static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) { return 0; } static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { } #endif static struct notifier_block ip_vs_dst_notifier = { .notifier_call = ip_vs_dst_event, #ifdef CONFIG_IP_VS_IPV6 .priority = ADDRCONF_NOTIFY_PRIORITY + 5, #endif }; int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) { int ret = -ENOMEM; int idx; /* Initialize rs_table */ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) INIT_HLIST_HEAD(&ipvs->rs_table[idx]); INIT_LIST_HEAD(&ipvs->dest_trash); spin_lock_init(&ipvs->dest_trash_lock); timer_setup(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire, 0); atomic_set(&ipvs->ftpsvc_counter, 0); atomic_set(&ipvs->nullsvc_counter, 0); atomic_set(&ipvs->conn_out_counter, 0); INIT_DELAYED_WORK(&ipvs->est_reload_work, est_reload_work_handler); /* procfs stats */ ipvs->tot_stats = kzalloc(sizeof(*ipvs->tot_stats), GFP_KERNEL); if (!ipvs->tot_stats) goto out; if (ip_vs_stats_init_alloc(&ipvs->tot_stats->s) < 0) goto err_tot_stats; #ifdef CONFIG_PROC_FS if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter))) goto err_vs; if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net, ip_vs_stats_show, NULL)) goto err_stats; if (!proc_create_net_single("ip_vs_stats_percpu", 0, ipvs->net->proc_net, ip_vs_stats_percpu_show, NULL)) goto err_percpu; #endif ret = ip_vs_control_net_init_sysctl(ipvs); if (ret < 0) goto err; return 0; err: #ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net); err_percpu: remove_proc_entry("ip_vs_stats", ipvs->net->proc_net); err_stats: remove_proc_entry("ip_vs", ipvs->net->proc_net); err_vs: #endif ip_vs_stats_release(&ipvs->tot_stats->s); err_tot_stats: kfree(ipvs->tot_stats); out: return ret; } void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs) { ip_vs_trash_cleanup(ipvs); ip_vs_control_net_cleanup_sysctl(ipvs); cancel_delayed_work_sync(&ipvs->est_reload_work); #ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net); remove_proc_entry("ip_vs_stats", ipvs->net->proc_net); remove_proc_entry("ip_vs", ipvs->net->proc_net); #endif call_rcu(&ipvs->tot_stats->rcu_head, ip_vs_stats_rcu_free); } int __init ip_vs_register_nl_ioctl(void) { int ret; ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); goto err_sock; } ret = ip_vs_genl_register(); if (ret) { pr_err("cannot register Generic Netlink interface.\n"); goto err_genl; } return 0; err_genl: nf_unregister_sockopt(&ip_vs_sockopts); err_sock: return ret; } void ip_vs_unregister_nl_ioctl(void) { ip_vs_genl_unregister(); nf_unregister_sockopt(&ip_vs_sockopts); } int __init ip_vs_control_init(void) { int idx; int ret; /* Initialize svc_table, ip_vs_svc_fwm_table */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { INIT_HLIST_HEAD(&ip_vs_svc_table[idx]); INIT_HLIST_HEAD(&ip_vs_svc_fwm_table[idx]); } smp_wmb(); /* Do we really need it now ? */ ret = register_netdevice_notifier(&ip_vs_dst_notifier); if (ret < 0) return ret; return 0; } void ip_vs_control_cleanup(void) { unregister_netdevice_notifier(&ip_vs_dst_notifier); /* relying on common rcu_barrier() in ip_vs_cleanup() */ }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Test shared zeropage handling (with/without storage keys) * * Copyright (C) 2024, Red Hat, Inc. */ #include <sys/mman.h> #include <linux/fs.h> #include "test_util.h" #include "kvm_util.h" #include "kselftest.h" #include "ucall_common.h" static void set_storage_key(void *addr, uint8_t skey) { asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); } static void guest_code(void) { /* Issue some storage key instruction. */ set_storage_key((void *)0, 0x98); GUEST_DONE(); } /* * Returns 1 if the shared zeropage is mapped, 0 if something else is mapped. * Returns < 0 on error or if nothing is mapped. */ static int maps_shared_zeropage(int pagemap_fd, void *addr) { struct page_region region; struct pm_scan_arg arg = { .start = (uintptr_t)addr, .end = (uintptr_t)addr + 4096, .vec = (uintptr_t)&region, .vec_len = 1, .size = sizeof(struct pm_scan_arg), .category_mask = PAGE_IS_PFNZERO, .category_anyof_mask = PAGE_IS_PRESENT, .return_mask = PAGE_IS_PFNZERO, }; return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); } int main(int argc, char *argv[]) { char *mem, *page0, *page1, *page2, tmp; const size_t pagesize = getpagesize(); struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; int pagemap_fd; ksft_print_header(); ksft_set_plan(3); /* * We'll use memory that is not mapped into the VM for simplicity. * Shared zeropages are enabled/disabled per-process. */ mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); TEST_ASSERT(mem != MAP_FAILED, "mmap() failed"); /* Disable THP. Ignore errors on older kernels. */ madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE); page0 = mem; page1 = page0 + pagesize; page2 = page1 + pagesize; /* Can we even detect shared zeropages? */ pagemap_fd = open("/proc/self/pagemap", O_RDONLY); TEST_REQUIRE(pagemap_fd >= 0); tmp = *page0; asm volatile("" : "+r" (tmp)); TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1); vm = vm_create_with_one_vcpu(&vcpu, guest_code); /* Verify that we get the shared zeropage after VM creation. */ tmp = *page1; asm volatile("" : "+r" (tmp)); ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1, "Shared zeropages should be enabled\n"); /* * Let our VM execute a storage key instruction that should * unshare all shared zeropages. */ vcpu_run(vcpu); get_ucall(vcpu, &uc); TEST_ASSERT_EQ(uc.cmd, UCALL_DONE); /* Verify that we don't have a shared zeropage anymore. */ ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1), "Shared zeropage should be gone\n"); /* Verify that we don't get any new shared zeropages. */ tmp = *page2; asm volatile("" : "+r" (tmp)); ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2), "Shared zeropages should be disabled\n"); kvm_vm_free(vm); ksft_finished(); }
/* * QorIQ Sec/Crypto 5.2 device tree stub [ controller @ offset 0x300000 ] * * Copyright 2011-2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ crypto: crypto@300000 { compatible = "fsl,sec-v5.2", "fsl,sec-v5.0", "fsl,sec-v4.0"; fsl,sec-era = <5>; #address-cells = <1>; #size-cells = <1>; reg = <0x300000 0x10000>; ranges = <0 0x300000 0x10000>; interrupts = <92 2 0 0>; sec_jr0: jr@1000 { compatible = "fsl,sec-v5.2-job-ring", "fsl,sec-v5.0-job-ring", "fsl,sec-v4.0-job-ring"; reg = <0x1000 0x1000>; interrupts = <88 2 0 0>; }; sec_jr1: jr@2000 { compatible = "fsl,sec-v5.2-job-ring", "fsl,sec-v5.0-job-ring", "fsl,sec-v4.0-job-ring"; reg = <0x2000 0x1000>; interrupts = <89 2 0 0>; }; sec_jr2: jr@3000 { compatible = "fsl,sec-v5.2-job-ring", "fsl,sec-v5.0-job-ring", "fsl,sec-v4.0-job-ring"; reg = <0x3000 0x1000>; interrupts = <90 2 0 0>; }; sec_jr3: jr@4000 { compatible = "fsl,sec-v5.2-job-ring", "fsl,sec-v5.0-job-ring", "fsl,sec-v4.0-job-ring"; reg = <0x4000 0x1000>; interrupts = <91 2 0 0>; }; rtic@6000 { compatible = "fsl,sec-v5.2-rtic", "fsl,sec-v5.0-rtic", "fsl,sec-v4.0-rtic"; #address-cells = <1>; #size-cells = <1>; reg = <0x6000 0x100>; ranges = <0x0 0x6100 0xe00>; rtic_a: rtic-a@0 { compatible = "fsl,sec-v5.2-rtic-memory", "fsl,sec-v5.0-rtic-memory", "fsl,sec-v4.0-rtic-memory"; reg = <0x00 0x20 0x100 0x80>; }; rtic_b: rtic-b@20 { compatible = "fsl,sec-v5.2-rtic-memory", "fsl,sec-v5.0-rtic-memory", "fsl,sec-v4.0-rtic-memory"; reg = <0x20 0x20 0x200 0x80>; }; rtic_c: rtic-c@40 { compatible = "fsl,sec-v5.2-rtic-memory", "fsl,sec-v5.0-rtic-memory", "fsl,sec-v4.0-rtic-memory"; reg = <0x40 0x20 0x300 0x80>; }; rtic_d: rtic-d@60 { compatible = "fsl,sec-v5.2-rtic-memory", "fsl,sec-v5.0-rtic-memory", "fsl,sec-v4.0-rtic-memory"; reg = <0x60 0x20 0x500 0x80>; }; }; }; sec_mon: sec_mon@314000 { compatible = "fsl,sec-v5.2-mon", "fsl,sec-v5.0-mon", "fsl,sec-v4.0-mon"; reg = <0x314000 0x1000>; interrupts = <93 2 0 0>; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Toshiba TC90522 Demodulator * * Copyright (C) 2014 Akihiro Tsukada <[email protected]> */ /* * The demod has 4 input (2xISDB-T and 2xISDB-S), * and provides independent sub modules for each input. * As the sub modules work in parallel and have the separate i2c addr's, * this driver treats each sub module as one demod device. */ #ifndef TC90522_H #define TC90522_H #include <linux/i2c.h> #include <media/dvb_frontend.h> /* I2C device types */ #define TC90522_I2C_DEV_SAT "tc90522sat" #define TC90522_I2C_DEV_TER "tc90522ter" struct tc90522_config { /* [OUT] frontend returned by driver */ struct dvb_frontend *fe; /* [OUT] tuner I2C adapter returned by driver */ struct i2c_adapter *tuner_i2c; /* [IN] use two separate I2C transactions for one tuner read */ bool split_tuner_read_i2c; }; #endif /* TC90522_H */
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2022 Linutronix GmbH */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; SEC("tc") int time_tai(struct __sk_buff *skb) { __u64 ts1, ts2; /* Get TAI timestamps */ ts1 = bpf_ktime_get_tai_ns(); ts2 = bpf_ktime_get_tai_ns(); /* Save TAI timestamps (Note: skb->hwtstamp is read-only) */ skb->tstamp = ts1; skb->cb[0] = ts2 & 0xffffffff; skb->cb[1] = ts2 >> 32; return 0; }
/* SPDX-License-Identifier: GPL-2.0 */ /* * mt8188-afe-common.h -- MediaTek 8188 audio driver definitions * * Copyright (c) 2022 MediaTek Inc. * Author: Bicycle Tsai <[email protected]> * Trevor Wu <[email protected]> * Chun-Chia Chiu <[email protected]> */ #ifndef _MT_8188_AFE_COMMON_H_ #define _MT_8188_AFE_COMMON_H_ #include <linux/list.h> #include <linux/regmap.h> #include <sound/soc.h> #include "../common/mtk-base-afe.h" enum { MT8188_DAI_START, MT8188_AFE_MEMIF_START = MT8188_DAI_START, MT8188_AFE_MEMIF_DL2 = MT8188_AFE_MEMIF_START, MT8188_AFE_MEMIF_DL3, MT8188_AFE_MEMIF_DL6, MT8188_AFE_MEMIF_DL7, MT8188_AFE_MEMIF_DL8, MT8188_AFE_MEMIF_DL10, MT8188_AFE_MEMIF_DL11, MT8188_AFE_MEMIF_UL_START, MT8188_AFE_MEMIF_UL1 = MT8188_AFE_MEMIF_UL_START, MT8188_AFE_MEMIF_UL2, MT8188_AFE_MEMIF_UL3, MT8188_AFE_MEMIF_UL4, MT8188_AFE_MEMIF_UL5, MT8188_AFE_MEMIF_UL6, MT8188_AFE_MEMIF_UL8, MT8188_AFE_MEMIF_UL9, MT8188_AFE_MEMIF_UL10, MT8188_AFE_MEMIF_END, MT8188_AFE_MEMIF_NUM = (MT8188_AFE_MEMIF_END - MT8188_AFE_MEMIF_START), MT8188_AFE_IO_START = MT8188_AFE_MEMIF_END, MT8188_AFE_IO_DL_SRC = MT8188_AFE_IO_START, MT8188_AFE_IO_DMIC_IN, MT8188_AFE_IO_DPTX, MT8188_AFE_IO_ETDM_START, MT8188_AFE_IO_ETDM1_IN = MT8188_AFE_IO_ETDM_START, MT8188_AFE_IO_ETDM2_IN, MT8188_AFE_IO_ETDM1_OUT, MT8188_AFE_IO_ETDM2_OUT, MT8188_AFE_IO_ETDM3_OUT, MT8188_AFE_IO_ETDM_END, MT8188_AFE_IO_ETDM_NUM = (MT8188_AFE_IO_ETDM_END - MT8188_AFE_IO_ETDM_START), MT8188_AFE_IO_PCM = MT8188_AFE_IO_ETDM_END, MT8188_AFE_IO_UL_SRC, MT8188_AFE_IO_END, MT8188_AFE_IO_NUM = (MT8188_AFE_IO_END - MT8188_AFE_IO_START), MT8188_DAI_END = MT8188_AFE_IO_END, MT8188_DAI_NUM = (MT8188_DAI_END - MT8188_DAI_START), }; enum { MT8188_TOP_CG_A1SYS_TIMING, MT8188_TOP_CG_A2SYS_TIMING, MT8188_TOP_CG_26M_TIMING, MT8188_TOP_CG_NUM, }; enum { MT8188_AFE_IRQ_1, MT8188_AFE_IRQ_2, MT8188_AFE_IRQ_3, MT8188_AFE_IRQ_8, MT8188_AFE_IRQ_9, MT8188_AFE_IRQ_10, MT8188_AFE_IRQ_13, MT8188_AFE_IRQ_14, MT8188_AFE_IRQ_15, MT8188_AFE_IRQ_16, MT8188_AFE_IRQ_17, MT8188_AFE_IRQ_18, MT8188_AFE_IRQ_19, MT8188_AFE_IRQ_20, MT8188_AFE_IRQ_21, MT8188_AFE_IRQ_22, MT8188_AFE_IRQ_23, MT8188_AFE_IRQ_24, MT8188_AFE_IRQ_25, MT8188_AFE_IRQ_26, MT8188_AFE_IRQ_27, MT8188_AFE_IRQ_28, MT8188_AFE_IRQ_NUM, }; enum { MT8188_ETDM_OUT1_1X_EN = 9, MT8188_ETDM_OUT2_1X_EN = 10, MT8188_ETDM_OUT3_1X_EN = 11, MT8188_ETDM_IN1_1X_EN = 12, MT8188_ETDM_IN2_1X_EN = 13, MT8188_ETDM_IN1_NX_EN = 25, MT8188_ETDM_IN2_NX_EN = 26, }; enum { MT8188_MTKAIF_MISO_0, MT8188_MTKAIF_MISO_1, MT8188_MTKAIF_MISO_NUM, }; struct mtk_dai_memif_irq_priv { unsigned int asys_timing_sel; }; struct mtkaif_param { bool mtkaif_calibration_ok; int mtkaif_chosen_phase[MT8188_MTKAIF_MISO_NUM]; int mtkaif_phase_cycle[MT8188_MTKAIF_MISO_NUM]; int mtkaif_dmic_on; }; struct clk; struct mt8188_afe_private { struct clk **clk; struct clk_lookup **lookup; struct regmap *topckgen; int pm_runtime_bypass_reg_ctl; spinlock_t afe_ctrl_lock; /* Lock for afe control */ struct mtk_dai_memif_irq_priv irq_priv[MT8188_AFE_IRQ_NUM]; struct mtkaif_param mtkaif_params; /* dai */ void *dai_priv[MT8188_DAI_NUM]; }; int mt8188_afe_fs_timing(unsigned int rate); /* dai register */ int mt8188_dai_adda_register(struct mtk_base_afe *afe); int mt8188_dai_etdm_register(struct mtk_base_afe *afe); int mt8188_dai_pcm_register(struct mtk_base_afe *afe); #define MT8188_SOC_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put, id) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_soc_info_enum_double, \ .get = xhandler_get, .put = xhandler_put, \ .device = id, \ .private_value = (unsigned long)&(xenum), \ } #endif
// SPDX-License-Identifier: GPL-2.0-only /* * MMC IOdelay values for TI's DRA74x, DRA75x and AM572x SoCs. * * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ */ /* * Rules for modifying this file: * a) Update of this file should typically correspond to a datamanual revision. * Datamanual revision that was used should be updated in comment below. * If there is no update to datamanual, do not update the values. If you * need to use values different from that recommended by the datamanual * for your design, then you should consider adding values to the device- * -tree file for your board directly. * b) We keep the mode names as close to the datamanual as possible. So * if the manual calls a mode, DDR50, or DDR or DDR 1.8v or DDR 3.3v, * we follow that in code too. * c) If the values change between multiple revisions of silicon, we add * a revision tag to both the new and old entry. Use 'rev11' for PG 1.1, * 'rev20' for PG 2.0 and so on. * d) The node name and node label should be the exact same string. This is * to curb naming creativity and achieve consistency. * * Datamanual Revisions: * * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 * */ &dra7_pmx_core { mmc1_pins_default: mmc1-default-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_sdr12: mmc1-sdr12-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_hs: mmc1-hs-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_sdr25: mmc1-sdr25-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_sdr50: mmc1-sdr50-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_ddr50: mmc1-ddr50-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc1_pins_sdr104: mmc1-sdr104-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ >; }; mmc2_pins_default: mmc2-default-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc2_pins_hs: mmc2-hs-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc2_pins_ddr_3_3v_rev11: mmc2-ddr-3-3v-rev11-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc2_pins_ddr_1_8v_rev11: mmc2-ddr-1-8v-rev11-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc2_pins_ddr_rev20: mmc2-ddr-rev20-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc2_pins_hs200: mmc2-hs200-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ >; }; mmc4_pins_default: mmc4-default-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ >; }; mmc4_pins_hs: mmc4-hs-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ >; }; mmc3_pins_default: mmc3-default-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_hs: mmc3-hs-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr12: mmc3-sdr12-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr25: mmc3-sdr25-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr50: mmc3-sdr50-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc4_pins_sdr12: mmc4-sdr12-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ >; }; mmc4_pins_sdr25: mmc4-sdr25-pins { pinctrl-single,pins = < DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ >; }; }; &dra7_iodelay_core { /* Corresponds to MMC1_DDR_MANUAL1 in datamanual */ mmc1_iodelay_ddr_rev11_conf: mmc1_iodelay_ddr_rev11_conf { pinctrl-pin-array = < 0x618 A_DELAY_PS(572) G_DELAY_PS(540) /* CFG_MMC1_CLK_IN */ 0x620 A_DELAY_PS(1525) G_DELAY_PS(0) /* CFG_MMC1_CLK_OUT */ 0x624 A_DELAY_PS(0) G_DELAY_PS(600) /* CFG_MMC1_CMD_IN */ 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ 0x62c A_DELAY_PS(55) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ 0x630 A_DELAY_PS(403) G_DELAY_PS(120) /* CFG_MMC1_DAT0_IN */ 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ 0x638 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ 0x63c A_DELAY_PS(23) G_DELAY_PS(60) /* CFG_MMC1_DAT1_IN */ 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ 0x648 A_DELAY_PS(25) G_DELAY_PS(60) /* CFG_MMC1_DAT2_IN */ 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ 0x654 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_IN */ 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ >; }; /* Corresponds to MMC1_DDR_MANUAL1 in datamanual */ mmc1_iodelay_ddr_rev20_conf: mmc1_iodelay_ddr50_rev20_conf { pinctrl-pin-array = < 0x618 A_DELAY_PS(1076) G_DELAY_PS(330) /* CFG_MMC1_CLK_IN */ 0x620 A_DELAY_PS(1271) G_DELAY_PS(0) /* CFG_MMC1_CLK_OUT */ 0x624 A_DELAY_PS(722) G_DELAY_PS(0) /* CFG_MMC1_CMD_IN */ 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ 0x62C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ 0x630 A_DELAY_PS(751) G_DELAY_PS(0) /* CFG_MMC1_DAT0_IN */ 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ 0x638 A_DELAY_PS(20) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ 0x63C A_DELAY_PS(256) G_DELAY_PS(0) /* CFG_MMC1_DAT1_IN */ 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ 0x648 A_DELAY_PS(263) G_DELAY_PS(0) /* CFG_MMC1_DAT2_IN */ 0x64C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ 0x654 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_IN */ 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ 0x65C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ >; }; /* Corresponds to MMC1_SDR104_MANUAL1 in datamanual */ mmc1_iodelay_sdr104_rev11_conf: mmc1_iodelay_sdr104_rev11_conf { pinctrl-pin-array = < 0x620 A_DELAY_PS(1063) G_DELAY_PS(17) /* CFG_MMC1_CLK_OUT */ 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ 0x62c A_DELAY_PS(23) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ 0x638 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ 0x644 A_DELAY_PS(2) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ >; }; /* Corresponds to MMC1_SDR104_MANUAL1 in datamanual */ mmc1_iodelay_sdr104_rev20_conf: mmc1_iodelay_sdr104_rev20_conf { pinctrl-pin-array = < 0x620 A_DELAY_PS(600) G_DELAY_PS(400) /* CFG_MMC1_CLK_OUT */ 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ 0x62c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ 0x638 A_DELAY_PS(30) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ >; }; /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ mmc2_iodelay_hs200_rev11_conf: mmc2_iodelay_hs200_rev11_conf { pinctrl-pin-array = < 0x190 A_DELAY_PS(621) G_DELAY_PS(600) /* CFG_GPMC_A19_OEN */ 0x194 A_DELAY_PS(300) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ 0x1a8 A_DELAY_PS(739) G_DELAY_PS(600) /* CFG_GPMC_A20_OEN */ 0x1ac A_DELAY_PS(240) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ 0x1b4 A_DELAY_PS(812) G_DELAY_PS(600) /* CFG_GPMC_A21_OEN */ 0x1b8 A_DELAY_PS(240) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ 0x1c0 A_DELAY_PS(954) G_DELAY_PS(600) /* CFG_GPMC_A22_OEN */ 0x1c4 A_DELAY_PS(60) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ 0x1d0 A_DELAY_PS(1340) G_DELAY_PS(420) /* CFG_GPMC_A23_OUT */ 0x1d8 A_DELAY_PS(935) G_DELAY_PS(600) /* CFG_GPMC_A24_OEN */ 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ 0x1e4 A_DELAY_PS(525) G_DELAY_PS(600) /* CFG_GPMC_A25_OEN */ 0x1e8 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ 0x1f0 A_DELAY_PS(767) G_DELAY_PS(600) /* CFG_GPMC_A26_OEN */ 0x1f4 A_DELAY_PS(225) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ 0x1fc A_DELAY_PS(565) G_DELAY_PS(600) /* CFG_GPMC_A27_OEN */ 0x200 A_DELAY_PS(60) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ 0x364 A_DELAY_PS(969) G_DELAY_PS(600) /* CFG_GPMC_CS1_OEN */ 0x368 A_DELAY_PS(180) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ mmc2_iodelay_hs200_rev20_conf: mmc2_iodelay_hs200_rev20_conf { pinctrl-pin-array = < 0x190 A_DELAY_PS(274) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ 0x194 A_DELAY_PS(162) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ 0x1a8 A_DELAY_PS(401) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ 0x1ac A_DELAY_PS(73) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ 0x1b4 A_DELAY_PS(465) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ 0x1b8 A_DELAY_PS(115) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ 0x1c0 A_DELAY_PS(633) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ 0x1c4 A_DELAY_PS(47) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ 0x1d0 A_DELAY_PS(935) G_DELAY_PS(280) /* CFG_GPMC_A23_OUT */ 0x1d8 A_DELAY_PS(621) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ 0x1e4 A_DELAY_PS(183) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ 0x1f0 A_DELAY_PS(467) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ 0x1fc A_DELAY_PS(262) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ 0x200 A_DELAY_PS(46) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ 0x364 A_DELAY_PS(684) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ 0x368 A_DELAY_PS(76) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; /* Correspnds to MMC2_DDR_3V3_MANUAL1 in datamanual */ mmc2_iodelay_ddr_3_3v_rev11_conf: mmc2_iodelay_ddr_3_3v_rev11_conf { pinctrl-pin-array = < 0x18c A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A19_IN */ 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ 0x194 A_DELAY_PS(174) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ 0x1a4 A_DELAY_PS(265) G_DELAY_PS(360) /* CFG_GPMC_A20_IN */ 0x1a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ 0x1ac A_DELAY_PS(168) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ 0x1b0 A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A21_IN */ 0x1b4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ 0x1b8 A_DELAY_PS(136) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ 0x1bc A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A22_IN */ 0x1c0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ 0x1c4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ 0x1c8 A_DELAY_PS(287) G_DELAY_PS(420) /* CFG_GPMC_A23_IN */ 0x1d0 A_DELAY_PS(879) G_DELAY_PS(0) /* CFG_GPMC_A23_OUT */ 0x1d4 A_DELAY_PS(144) G_DELAY_PS(240) /* CFG_GPMC_A24_IN */ 0x1d8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */ 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ 0x1e8 A_DELAY_PS(34) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ 0x1ec A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A26_IN */ 0x1f0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ 0x1f4 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ 0x1f8 A_DELAY_PS(120) G_DELAY_PS(180) /* CFG_GPMC_A27_IN */ 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */ 0x364 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ 0x368 A_DELAY_PS(11) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; /* Corresponds to MMC2_DDR_1V8_MANUAL1 in datamanual */ mmc2_iodelay_ddr_1_8v_rev11_conf: mmc2_iodelay_ddr_1_8v_rev11_conf { pinctrl-pin-array = < 0x18c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_IN */ 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ 0x194 A_DELAY_PS(174) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ 0x1a4 A_DELAY_PS(274) G_DELAY_PS(240) /* CFG_GPMC_A20_IN */ 0x1a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ 0x1ac A_DELAY_PS(168) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ 0x1b0 A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A21_IN */ 0x1b4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ 0x1b8 A_DELAY_PS(136) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ 0x1bc A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A22_IN */ 0x1c0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ 0x1c4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ 0x1c8 A_DELAY_PS(514) G_DELAY_PS(360) /* CFG_GPMC_A23_IN */ 0x1d0 A_DELAY_PS(879) G_DELAY_PS(0) /* CFG_GPMC_A23_OUT */ 0x1d4 A_DELAY_PS(187) G_DELAY_PS(120) /* CFG_GPMC_A24_IN */ 0x1d8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */ 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ 0x1e8 A_DELAY_PS(34) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ 0x1ec A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A26_IN */ 0x1f0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ 0x1f4 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ 0x1f8 A_DELAY_PS(121) G_DELAY_PS(60) /* CFG_GPMC_A27_IN */ 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */ 0x364 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ 0x368 A_DELAY_PS(11) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; /* Corresponds to MMC3_MANUAL1 in datamanual */ mmc3_iodelay_manual1_rev20_conf: mmc3_iodelay_manual1_conf { pinctrl-pin-array = < 0x678 A_DELAY_PS(0) G_DELAY_PS(386) /* CFG_MMC3_CLK_IN */ 0x680 A_DELAY_PS(605) G_DELAY_PS(0) /* CFG_MMC3_CLK_OUT */ 0x684 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_IN */ 0x688 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OEN */ 0x68c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OUT */ 0x690 A_DELAY_PS(171) G_DELAY_PS(0) /* CFG_MMC3_DAT0_IN */ 0x694 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OEN */ 0x698 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OUT */ 0x69c A_DELAY_PS(221) G_DELAY_PS(0) /* CFG_MMC3_DAT1_IN */ 0x6a0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OEN */ 0x6a4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OUT */ 0x6a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_IN */ 0x6ac A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OEN */ 0x6b0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OUT */ 0x6b4 A_DELAY_PS(474) G_DELAY_PS(0) /* CFG_MMC3_DAT3_IN */ 0x6b8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OEN */ 0x6bc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OUT */ >; }; /* Corresponds to MMC3_MANUAL1 in datamanual */ mmc3_iodelay_manual1_rev11_conf: mmc3_iodelay_manual1_conf { pinctrl-pin-array = < 0x678 A_DELAY_PS(406) G_DELAY_PS(0) /* CFG_MMC3_CLK_IN */ 0x680 A_DELAY_PS(659) G_DELAY_PS(0) /* CFG_MMC3_CLK_OUT */ 0x684 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_IN */ 0x688 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OEN */ 0x68c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OUT */ 0x690 A_DELAY_PS(130) G_DELAY_PS(0) /* CFG_MMC3_DAT0_IN */ 0x694 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OEN */ 0x698 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OUT */ 0x69c A_DELAY_PS(169) G_DELAY_PS(0) /* CFG_MMC3_DAT1_IN */ 0x6a0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OEN */ 0x6a4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OUT */ 0x6a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_IN */ 0x6ac A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OEN */ 0x6b0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OUT */ 0x6b4 A_DELAY_PS(457) G_DELAY_PS(0) /* CFG_MMC3_DAT3_IN */ 0x6b8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OEN */ 0x6bc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OUT */ >; }; /* Corresponds to MMC4_DS_MANUAL1 in datamanual */ mmc4_iodelay_ds_rev11_conf: mmc4_iodelay_ds_rev11_conf { pinctrl-pin-array = < 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ 0x848 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ 0x84c A_DELAY_PS(96) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ 0x870 A_DELAY_PS(582) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ 0x87c A_DELAY_PS(391) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ 0x888 A_DELAY_PS(561) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ 0x894 A_DELAY_PS(588) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ >; }; /* Corresponds to MMC4_DS_MANUAL1 in datamanual */ mmc4_iodelay_ds_rev20_conf: mmc4_iodelay_ds_rev20_conf { pinctrl-pin-array = < 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ 0x848 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ 0x84c A_DELAY_PS(307) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ 0x870 A_DELAY_PS(785) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ 0x87c A_DELAY_PS(613) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ 0x888 A_DELAY_PS(683) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ 0x894 A_DELAY_PS(835) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ >; }; /* Corresponds to MMC4_MANUAL1 in datamanual */ mmc4_iodelay_sdr12_hs_sdr25_rev11_conf: mmc4_iodelay_sdr12_hs_sdr25_rev11_conf { pinctrl-pin-array = < 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ 0x848 A_DELAY_PS(2651) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ 0x84c A_DELAY_PS(1572) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ 0x870 A_DELAY_PS(1913) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ 0x87c A_DELAY_PS(1721) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ 0x888 A_DELAY_PS(1891) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ 0x894 A_DELAY_PS(1919) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ >; }; /* Corresponds to MMC4_MANUAL1 in datamanual */ mmc4_iodelay_sdr12_hs_sdr25_rev20_conf: mmc4_iodelay_sdr12_hs_sdr25_rev20_conf { pinctrl-pin-array = < 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ 0x848 A_DELAY_PS(1147) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ 0x84c A_DELAY_PS(1834) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ 0x870 A_DELAY_PS(2165) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ 0x87c A_DELAY_PS(1929) G_DELAY_PS(64) /* CFG_UART2_RTSN_IN */ 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ 0x888 A_DELAY_PS(1935) G_DELAY_PS(128) /* CFG_UART2_RXD_IN */ 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ 0x894 A_DELAY_PS(2172) G_DELAY_PS(44) /* CFG_UART2_TXD_IN */ 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ >; }; };
// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <[email protected]> * Copyright (C) 2018 Lorenzo Bianconi <[email protected]> */ #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/delay.h> #include "mt76x02_mcu.h" int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); u32 *rxfce; if (!skb) { dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n", abs(cmd), seq); dev->mcu_timeout = 1; return -ETIMEDOUT; } rxfce = (u32 *)skb->cb; if (seq != FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(mt76x02_mcu_parse_response); int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); unsigned long expires = jiffies + HZ; struct sk_buff *skb; u32 tx_info; int ret; u8 seq; if (dev->mcu_timeout) return -EIO; skb = mt76_mcu_msg_alloc(mdev, data, len); if (!skb) return -ENOMEM; mutex_lock(&mdev->mcu.mutex); seq = ++mdev->mcu.msg_seq & 0xf; if (!seq) seq = ++mdev->mcu.msg_seq & 0xf; tx_info = MT_MCU_MSG_TYPE_CMD | FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, skb->len); ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, tx_info); if (ret) goto out; while (wait_resp) { skb = mt76_mcu_get_response(&dev->mt76, expires); ret = mt76x02_mcu_parse_response(mdev, cmd, skb, seq); dev_kfree_skb(skb); if (ret != -EAGAIN) break; } out: mutex_unlock(&mdev->mcu.mutex); return ret; } EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, u32 val) { struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(func), .value = cpu_to_le32(val), }; bool wait = false; if (func != Q_SELECT) wait = true; return mt76_mcu_send_msg(&dev->mt76, CMD_FUN_SET_OP, &msg, sizeof(msg), wait); } EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on) { struct { __le32 mode; __le32 level; } __packed __aligned(4) msg = { .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), .level = cpu_to_le32(0), }; return mt76_mcu_send_msg(&dev->mt76, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false); } EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param) { struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(type), .value = cpu_to_le32(param), }; bool is_mt76x2e = mt76_is_mmio(&dev->mt76) && is_mt76x2(dev); int ret; if (is_mt76x2e) mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); ret = mt76_mcu_send_msg(&dev->mt76, CMD_CALIBRATION_OP, &msg, sizeof(msg), true); if (ret) return ret; if (is_mt76x2e && WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, BIT(31), BIT(31), 100))) return -ETIMEDOUT; return 0; } EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); int mt76x02_mcu_cleanup(struct mt76x02_dev *dev) { struct sk_buff *skb; mt76_wr(dev, MT_MCU_INT_LEVEL, 1); usleep_range(20000, 30000); while ((skb = skb_dequeue(&dev->mt76.mcu.res_q)) != NULL) dev_kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, const struct mt76x02_fw_header *h) { u16 bld = le16_to_cpu(h->build_ver); u16 ver = le16_to_cpu(h->fw_ver); snprintf(dev->mt76.hw->wiphy->fw_version, sizeof(dev->mt76.hw->wiphy->fw_version), "%d.%d.%02d-b%x", (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); } EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver);
// SPDX-License-Identifier: GPL-2.0-or-later /* * Broadcom BCM7038 PWM driver * Author: Florian Fainelli * * Copyright (C) 2015 Broadcom Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/export.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/spinlock.h> #define PWM_CTRL 0x00 #define CTRL_START BIT(0) #define CTRL_OEB BIT(1) #define CTRL_FORCE_HIGH BIT(2) #define CTRL_OPENDRAIN BIT(3) #define CTRL_CHAN_OFFS 4 #define PWM_CTRL2 0x04 #define CTRL2_OUT_SELECT BIT(0) #define PWM_CH_SIZE 0x8 #define PWM_CWORD_MSB(ch) (0x08 + ((ch) * PWM_CH_SIZE)) #define PWM_CWORD_LSB(ch) (0x0c + ((ch) * PWM_CH_SIZE)) /* Number of bits for the CWORD value */ #define CWORD_BIT_SIZE 16 /* * Maximum control word value allowed when variable-frequency PWM is used as a * clock for the constant-frequency PMW. */ #define CONST_VAR_F_MAX 32768 #define CONST_VAR_F_MIN 1 #define PWM_ON(ch) (0x18 + ((ch) * PWM_CH_SIZE)) #define PWM_ON_MIN 1 #define PWM_PERIOD(ch) (0x1c + ((ch) * PWM_CH_SIZE)) #define PWM_PERIOD_MIN 0 #define PWM_ON_PERIOD_MAX 0xff struct brcmstb_pwm { void __iomem *base; struct clk *clk; }; static inline u32 brcmstb_pwm_readl(struct brcmstb_pwm *p, unsigned int offset) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(p->base + offset); else return readl_relaxed(p->base + offset); } static inline void brcmstb_pwm_writel(struct brcmstb_pwm *p, u32 value, unsigned int offset) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, p->base + offset); else writel_relaxed(value, p->base + offset); } static inline struct brcmstb_pwm *to_brcmstb_pwm(struct pwm_chip *chip) { return pwmchip_get_drvdata(chip); } /* * Fv is derived from the variable frequency output. The variable frequency * output is configured using this formula: * * W = cword, if cword < 2 ^ 15 else 16-bit 2's complement of cword * * Fv = W x 2 ^ -16 x 27Mhz (reference clock) * * The period is: (period + 1) / Fv and "on" time is on / (period + 1) * * The PWM core framework specifies that the "duty_ns" parameter is in fact the * "on" time, so this translates directly into our HW programming here. */ static int brcmstb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, u64 duty_ns, u64 period_ns) { struct brcmstb_pwm *p = to_brcmstb_pwm(chip); unsigned long pc, dc, cword = CONST_VAR_F_MAX; unsigned int channel = pwm->hwpwm; u32 value; /* * If asking for a duty_ns equal to period_ns, we need to substract * the period value by 1 to make it shorter than the "on" time and * produce a flat 100% duty cycle signal, and max out the "on" time */ if (duty_ns == period_ns) { dc = PWM_ON_PERIOD_MAX; pc = PWM_ON_PERIOD_MAX - 1; goto done; } while (1) { u64 rate; /* * Calculate the base rate from base frequency and current * cword */ rate = (u64)clk_get_rate(p->clk) * (u64)cword; rate >>= CWORD_BIT_SIZE; pc = mul_u64_u64_div_u64(period_ns, rate, NSEC_PER_SEC); dc = mul_u64_u64_div_u64(duty_ns + 1, rate, NSEC_PER_SEC); /* * We can be called with separate duty and period updates, * so do not reject dc == 0 right away */ if (pc == PWM_PERIOD_MIN || (dc < PWM_ON_MIN && duty_ns)) return -EINVAL; /* We converged on a calculation */ if (pc <= PWM_ON_PERIOD_MAX && dc <= PWM_ON_PERIOD_MAX) break; /* * The cword needs to be a power of 2 for the variable * frequency generator to output a 50% duty cycle variable * frequency which is used as input clock to the fixed * frequency generator. */ cword >>= 1; /* * Desired periods are too large, we do not have a divider * for them */ if (cword < CONST_VAR_F_MIN) return -EINVAL; } done: /* * Configure the defined "cword" value to have the variable frequency * generator output a base frequency for the constant frequency * generator to derive from. */ brcmstb_pwm_writel(p, cword >> 8, PWM_CWORD_MSB(channel)); brcmstb_pwm_writel(p, cword & 0xff, PWM_CWORD_LSB(channel)); /* Select constant frequency signal output */ value = brcmstb_pwm_readl(p, PWM_CTRL2); value |= CTRL2_OUT_SELECT << (channel * CTRL_CHAN_OFFS); brcmstb_pwm_writel(p, value, PWM_CTRL2); /* Configure on and period value */ brcmstb_pwm_writel(p, pc, PWM_PERIOD(channel)); brcmstb_pwm_writel(p, dc, PWM_ON(channel)); return 0; } static inline void brcmstb_pwm_enable_set(struct brcmstb_pwm *p, unsigned int channel, bool enable) { unsigned int shift = channel * CTRL_CHAN_OFFS; u32 value; value = brcmstb_pwm_readl(p, PWM_CTRL); if (enable) { value &= ~(CTRL_OEB << shift); value |= (CTRL_START | CTRL_OPENDRAIN) << shift; } else { value &= ~((CTRL_START | CTRL_OPENDRAIN) << shift); value |= CTRL_OEB << shift; } brcmstb_pwm_writel(p, value, PWM_CTRL); } static int brcmstb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct brcmstb_pwm *p = to_brcmstb_pwm(chip); int err; if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; if (!state->enabled) { if (pwm->state.enabled) brcmstb_pwm_enable_set(p, pwm->hwpwm, false); return 0; } err = brcmstb_pwm_config(chip, pwm, state->duty_cycle, state->period); if (err) return err; if (!pwm->state.enabled) brcmstb_pwm_enable_set(p, pwm->hwpwm, true); return 0; } static const struct pwm_ops brcmstb_pwm_ops = { .apply = brcmstb_pwm_apply, }; static const struct of_device_id brcmstb_pwm_of_match[] = { { .compatible = "brcm,bcm7038-pwm", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, brcmstb_pwm_of_match); static int brcmstb_pwm_probe(struct platform_device *pdev) { struct pwm_chip *chip; struct brcmstb_pwm *p; int ret; chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*p)); if (IS_ERR(chip)) return PTR_ERR(chip); p = to_brcmstb_pwm(chip); p->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(p->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(p->clk), "failed to obtain clock\n"); platform_set_drvdata(pdev, p); chip->ops = &brcmstb_pwm_ops; p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); ret = devm_pwmchip_add(&pdev->dev, chip); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n"); return 0; } static int brcmstb_pwm_suspend(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); clk_disable_unprepare(p->clk); return 0; } static int brcmstb_pwm_resume(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); return clk_prepare_enable(p->clk); } static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend, brcmstb_pwm_resume); static struct platform_driver brcmstb_pwm_driver = { .probe = brcmstb_pwm_probe, .driver = { .name = "pwm-brcmstb", .of_match_table = brcmstb_pwm_of_match, .pm = pm_ptr(&brcmstb_pwm_pm_ops), }, }; module_platform_driver(brcmstb_pwm_driver); MODULE_AUTHOR("Florian Fainelli <[email protected]>"); MODULE_DESCRIPTION("Broadcom STB PWM driver"); MODULE_ALIAS("platform:pwm-brcmstb"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2024, Linaro Ltd. All rights reserved. */ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/usb/pd.h> #include <linux/usb/tcpm.h> #include "qcom_pmic_typec.h" #include "qcom_pmic_typec_pdphy.h" static int qcom_pmic_typec_pdphy_stub_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type, const struct pd_message *msg, unsigned int negotiated_rev) { struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); struct device *dev = tcpm->dev; dev_dbg(dev, "pdphy_transmit: type=%d\n", type); tcpm_pd_transmit_complete(tcpm->tcpm_port, TCPC_TX_SUCCESS); return 0; } static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on) { struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); struct device *dev = tcpm->dev; dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off"); return 0; } static int qcom_pmic_typec_pdphy_stub_set_roles(struct tcpc_dev *tcpc, bool attached, enum typec_role power_role, enum typec_data_role data_role) { struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); struct device *dev = tcpm->dev; dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n", data_role, power_role); return 0; } static int qcom_pmic_typec_pdphy_stub_start(struct pmic_typec *tcpm, struct tcpm_port *tcpm_port) { return 0; } static void qcom_pmic_typec_pdphy_stub_stop(struct pmic_typec *tcpm) { } int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev, struct pmic_typec *tcpm) { tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_stub_set_pd_rx; tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_stub_set_roles; tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_stub_pd_transmit; tcpm->pdphy_start = qcom_pmic_typec_pdphy_stub_start; tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stub_stop; return 0; }
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2016 Andreas Färber * Copyright (c) 2016 BayLibre, Inc. * Author: Kevin Hilman <[email protected]> */ /dts-v1/; #include "meson-gxbb-p20x.dtsi" #include <dt-bindings/input/input.h> #include <dt-bindings/sound/meson-aiu.h> / { compatible = "amlogic,p200", "amlogic,meson-gxbb"; model = "Amlogic Meson GXBB P200 Development Board"; spdif_dit: audio-codec-0 { #sound-dai-cells = <0>; compatible = "linux,spdif-dit"; sound-name-prefix = "DIT"; }; avdd18_usb_adc: regulator-avdd18-usb-adc { compatible = "regulator-fixed"; regulator-name = "AVDD18_USB_ADC"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; keys { compatible = "adc-keys"; io-channels = <&saradc 0>; io-channel-names = "buttons"; keyup-threshold-microvolt = <1800000>; button-home { label = "Home"; linux,code = <KEY_HOME>; press-threshold-microvolt = <900000>; /* 50% */ }; button-esc { label = "Esc"; linux,code = <KEY_ESC>; press-threshold-microvolt = <684000>; /* 38% */ }; button-up { label = "Volume Up"; linux,code = <KEY_VOLUMEUP>; press-threshold-microvolt = <468000>; /* 26% */ }; button-down { label = "Volume Down"; linux,code = <KEY_VOLUMEDOWN>; press-threshold-microvolt = <252000>; /* 14% */ }; button-menu { label = "Menu"; linux,code = <KEY_MENU>; press-threshold-microvolt = <0>; /* 0% */ }; }; sound { compatible = "amlogic,gx-sound-card"; model = "P200"; clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; assigned-clock-parents = <0>, <0>, <0>; assigned-clock-rates = <294912000>, <270950400>, <393216000>; dai-link-0 { sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>; }; dai-link-1 { sound-dai = <&aiu AIU_CPU CPU_SPDIF_FIFO>; }; dai-link-2 { sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>; dai-format = "i2s"; mclk-fs = <256>; codec-0 { sound-dai = <&aiu AIU_HDMI CTRL_I2S>; }; }; dai-link-3 { sound-dai = <&aiu AIU_CPU CPU_SPDIF_ENCODER>; codec-0 { sound-dai = <&spdif_dit>; }; }; dai-link-4 { sound-dai = <&aiu AIU_HDMI CTRL_OUT>; codec-0 { sound-dai = <&hdmi_tx>; }; }; }; }; &aiu { status = "okay"; pinctrl-0 = <&spdif_out_y_pins>; pinctrl-names = "default"; }; &ethmac { status = "okay"; pinctrl-0 = <&eth_rgmii_pins>; pinctrl-names = "default"; phy-handle = <&eth_phy0>; phy-mode = "rgmii"; amlogic,tx-delay-ns = <2>; mdio { compatible = "snps,dwmac-mdio"; #address-cells = <1>; #size-cells = <0>; eth_phy0: ethernet-phy@3 { /* Micrel KSZ9031 (0x00221620) */ reg = <3>; reset-assert-us = <10000>; reset-deassert-us = <30000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; /* MAC_INTR on GPIOZ_15 */ interrupts = <29 IRQ_TYPE_LEVEL_LOW>; }; }; }; &i2c_B { status = "okay"; pinctrl-0 = <&i2c_b_pins>; pinctrl-names = "default"; }; &saradc { status = "okay"; vref-supply = <&avdd18_usb_adc>; };
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2016 Andreas Färber * Copyright (c) 2016 BayLibre, Inc. * Author: Kevin Hilman <[email protected]> */ #include "meson-gxbb.dtsi" / { aliases { serial0 = &uart_AO; ethernet0 = &ethmac; }; chosen { stdout-path = "serial0:115200n8"; }; memory@0 { device_type = "memory"; reg = <0x0 0x0 0x0 0x40000000>; }; usb_pwr: regulator-usb-pwrs { compatible = "regulator-fixed"; regulator-name = "USB_PWR"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; /* signal name in schematic: USB_PWR_EN */ gpio = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>; enable-active-high; }; vddio_card: gpio-regulator { compatible = "regulator-gpio"; regulator-name = "VDDIO_CARD"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; gpios = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>; gpios-states = <1>; /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ states = <1800000 0>, <3300000 1>; regulator-settling-time-up-us = <10000>; regulator-settling-time-down-us = <150000>; }; vddio_boot: regulator-vddio-boot { compatible = "regulator-fixed"; regulator-name = "VDDIO_BOOT"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; vddao_3v3: regulator-vddao-3v3 { compatible = "regulator-fixed"; regulator-name = "VDDAO_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; vcc_3v3: regulator-vcc-3v3 { compatible = "regulator-fixed"; regulator-name = "VCC_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; emmc_pwrseq: emmc-pwrseq { compatible = "mmc-pwrseq-emmc"; reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>; }; wifi32k: wifi32k { compatible = "pwm-clock"; #clock-cells = <0>; clock-frequency = <32768>; pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */ }; sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; clocks = <&wifi32k>; clock-names = "ext_clock"; }; cvbs_connector: cvbs-connector { compatible = "composite-video-connector"; port { cvbs_connector_in: endpoint { remote-endpoint = <&cvbs_vdac_out>; }; }; }; hdmi-connector { compatible = "hdmi-connector"; type = "a"; port { hdmi_connector_in: endpoint { remote-endpoint = <&hdmi_tx_tmds_out>; }; }; }; }; &cec_AO { status = "okay"; pinctrl-0 = <&ao_cec_pins>; pinctrl-names = "default"; hdmi-phandle = <&hdmi_tx>; }; &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; }; }; &hdmi_tx { status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; pinctrl-names = "default"; }; &hdmi_tx_tmds_port { hdmi_tx_tmds_out: endpoint { remote-endpoint = <&hdmi_connector_in>; }; }; &ir { status = "okay"; pinctrl-0 = <&remote_input_ao_pins>; pinctrl-names = "default"; }; &pwm_ef { status = "okay"; pinctrl-0 = <&pwm_e_pins>; pinctrl-names = "default"; clocks = <&clkc CLKID_FCLK_DIV4>; clock-names = "clkin0"; }; /* Wireless SDIO Module */ &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; pinctrl-1 = <&sdio_clk_gate_pins>; pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; bus-width = <4>; cap-sd-highspeed; max-frequency = <50000000>; non-removable; disable-wp; /* WiFi firmware requires power to be kept while in suspend */ keep-power-in-suspend; mmc-pwrseq = <&sdio_pwrseq>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; brcmf: wifi@1 { reg = <1>; compatible = "brcm,bcm4329-fmac"; }; }; /* SD card */ &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; pinctrl-1 = <&sdcard_clk_gate_pins>; pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; sd-uhs-sdr12; sd-uhs-sdr25; sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; }; /* eMMC */ &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>; pinctrl-1 = <&emmc_clk_gate_pins>; pinctrl-names = "default", "clk-gate"; bus-width = <8>; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; disable-wp; mmc-ddr-1_8v; mmc-hs200-1_8v; mmc-pwrseq = <&emmc_pwrseq>; vmmc-supply = <&vcc_3v3>; vqmmc-supply = <&vddio_boot>; }; /* This UART is brought out to the DB9 connector */ &uart_AO { status = "okay"; pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; &usb0_phy { status = "okay"; phy-supply = <&usb_pwr>; }; &usb1_phy { status = "okay"; }; &usb0 { status = "okay"; }; &usb1 { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the functions that access LEB properties and their * categories. LEBs are categorized based on the needs of UBIFS, and the * categories are stored as either heaps or lists to provide a fast way of * finding a LEB in a particular category. For example, UBIFS may need to find * an empty LEB for the journal, or a very dirty LEB for garbage collection. */ #include "ubifs.h" /** * get_heap_comp_val - get the LEB properties value for heap comparisons. * @lprops: LEB properties * @cat: LEB category */ static int get_heap_comp_val(struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_FREE: return lprops->free; case LPROPS_DIRTY_IDX: return lprops->free + lprops->dirty; default: return lprops->dirty; } } /** * move_up_lpt_heap - move a new heap entry up as far as possible. * @c: UBIFS file-system description object * @heap: LEB category heap * @lprops: LEB properties to move * @cat: LEB category * * New entries to a heap are added at the bottom and then moved up until the * parent's value is greater. In the case of LPT's category heaps, the value * is either the amount of free space or the amount of dirty space, depending * on the category. */ static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, struct ubifs_lprops *lprops, int cat) { int val1, val2, hpos; hpos = lprops->hpos; if (!hpos) return; /* Already top of the heap */ val1 = get_heap_comp_val(lprops, cat); /* Compare to parent and, if greater, move up the heap */ do { int ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val2 >= val1) return; /* Greater than parent so move up */ heap->arr[ppos]->hpos = hpos; heap->arr[hpos] = heap->arr[ppos]; heap->arr[ppos] = lprops; lprops->hpos = ppos; hpos = ppos; } while (hpos); } /** * adjust_lpt_heap - move a changed heap entry up or down the heap. * @c: UBIFS file-system description object * @heap: LEB category heap * @lprops: LEB properties to move * @hpos: heap position of @lprops * @cat: LEB category * * Changed entries in a heap are moved up or down until the parent's value is * greater. In the case of LPT's category heaps, the value is either the amount * of free space or the amount of dirty space, depending on the category. */ static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, struct ubifs_lprops *lprops, int hpos, int cat) { int val1, val2, val3, cpos; val1 = get_heap_comp_val(lprops, cat); /* Compare to parent and, if greater than parent, move up the heap */ if (hpos) { int ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val1 > val2) { /* Greater than parent so move up */ while (1) { heap->arr[ppos]->hpos = hpos; heap->arr[hpos] = heap->arr[ppos]; heap->arr[ppos] = lprops; lprops->hpos = ppos; hpos = ppos; if (!hpos) return; ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val1 <= val2) return; /* Still greater than parent so keep going */ } } } /* Not greater than parent, so compare to children */ while (1) { /* Compare to left child */ cpos = hpos * 2 + 1; if (cpos >= heap->cnt) return; val2 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 < val2) { /* Less than left child, so promote biggest child */ if (cpos + 1 < heap->cnt) { val3 = get_heap_comp_val(heap->arr[cpos + 1], cat); if (val3 > val2) cpos += 1; /* Right child is bigger */ } heap->arr[cpos]->hpos = hpos; heap->arr[hpos] = heap->arr[cpos]; heap->arr[cpos] = lprops; lprops->hpos = cpos; hpos = cpos; continue; } /* Compare to right child */ cpos += 1; if (cpos >= heap->cnt) return; val3 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 < val3) { /* Less than right child, so promote right child */ heap->arr[cpos]->hpos = hpos; heap->arr[hpos] = heap->arr[cpos]; heap->arr[cpos] = lprops; lprops->hpos = cpos; hpos = cpos; continue; } return; } } /** * add_to_lpt_heap - add LEB properties to a LEB category heap. * @c: UBIFS file-system description object * @lprops: LEB properties to add * @cat: LEB category * * This function returns %1 if @lprops is added to the heap for LEB category * @cat, otherwise %0 is returned because the heap is full. */ static int add_to_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; if (heap->cnt >= heap->max_cnt) { const int b = LPT_HEAP_SZ / 2 - 1; int cpos, val1, val2; /* Compare to some other LEB on the bottom of heap */ /* Pick a position kind of randomly */ cpos = (((size_t)lprops >> 4) & b) + b; ubifs_assert(c, cpos >= b); ubifs_assert(c, cpos < LPT_HEAP_SZ); ubifs_assert(c, cpos < heap->cnt); val1 = get_heap_comp_val(lprops, cat); val2 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 > val2) { struct ubifs_lprops *lp; lp = heap->arr[cpos]; lp->flags &= ~LPROPS_CAT_MASK; lp->flags |= LPROPS_UNCAT; list_add(&lp->list, &c->uncat_list); lprops->hpos = cpos; heap->arr[cpos] = lprops; move_up_lpt_heap(c, heap, lprops, cat); dbg_check_heap(c, heap, cat, lprops->hpos); return 1; /* Added to heap */ } dbg_check_heap(c, heap, cat, -1); return 0; /* Not added to heap */ } else { lprops->hpos = heap->cnt++; heap->arr[lprops->hpos] = lprops; move_up_lpt_heap(c, heap, lprops, cat); dbg_check_heap(c, heap, cat, lprops->hpos); return 1; /* Added to heap */ } } /** * remove_from_lpt_heap - remove LEB properties from a LEB category heap. * @c: UBIFS file-system description object * @lprops: LEB properties to remove * @cat: LEB category */ static void remove_from_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { struct ubifs_lpt_heap *heap; int hpos = lprops->hpos; heap = &c->lpt_heap[cat - 1]; ubifs_assert(c, hpos >= 0 && hpos < heap->cnt); ubifs_assert(c, heap->arr[hpos] == lprops); heap->cnt -= 1; if (hpos < heap->cnt) { heap->arr[hpos] = heap->arr[heap->cnt]; heap->arr[hpos]->hpos = hpos; adjust_lpt_heap(c, heap, heap->arr[hpos], hpos, cat); } dbg_check_heap(c, heap, cat, -1); } /** * lpt_heap_replace - replace lprops in a category heap. * @c: UBIFS file-system description object * @new_lprops: LEB properties with which to replace * @cat: LEB category * * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode) * and the lprops that the pnode contains. When that happens, references in * the category heaps to those lprops must be updated to point to the new * lprops. This function does that. */ static void lpt_heap_replace(struct ubifs_info *c, struct ubifs_lprops *new_lprops, int cat) { struct ubifs_lpt_heap *heap; int hpos = new_lprops->hpos; heap = &c->lpt_heap[cat - 1]; heap->arr[hpos] = new_lprops; } /** * ubifs_add_to_cat - add LEB properties to a category list or heap. * @c: UBIFS file-system description object * @lprops: LEB properties to add * @cat: LEB category to which to add * * LEB properties are categorized to enable fast find operations. */ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: if (add_to_lpt_heap(c, lprops, cat)) break; /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; fallthrough; case LPROPS_UNCAT: list_add(&lprops->list, &c->uncat_list); break; case LPROPS_EMPTY: list_add(&lprops->list, &c->empty_list); break; case LPROPS_FREEABLE: list_add(&lprops->list, &c->freeable_list); c->freeable_cnt += 1; break; case LPROPS_FRDI_IDX: list_add(&lprops->list, &c->frdi_idx_list); break; default: ubifs_assert(c, 0); } lprops->flags &= ~LPROPS_CAT_MASK; lprops->flags |= cat; c->in_a_category_cnt += 1; ubifs_assert(c, c->in_a_category_cnt <= c->main_lebs); } /** * ubifs_remove_from_cat - remove LEB properties from a category list or heap. * @c: UBIFS file-system description object * @lprops: LEB properties to remove * @cat: LEB category from which to remove * * LEB properties are categorized to enable fast find operations. */ static void ubifs_remove_from_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: remove_from_lpt_heap(c, lprops, cat); break; case LPROPS_FREEABLE: c->freeable_cnt -= 1; ubifs_assert(c, c->freeable_cnt >= 0); fallthrough; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FRDI_IDX: ubifs_assert(c, !list_empty(&lprops->list)); list_del(&lprops->list); break; default: ubifs_assert(c, 0); } c->in_a_category_cnt -= 1; ubifs_assert(c, c->in_a_category_cnt >= 0); } /** * ubifs_replace_cat - replace lprops in a category list or heap. * @c: UBIFS file-system description object * @old_lprops: LEB properties to replace * @new_lprops: LEB properties with which to replace * * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode) * and the lprops that the pnode contains. When that happens, references in * category lists and heaps must be replaced. This function does that. */ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, struct ubifs_lprops *new_lprops) { int cat; cat = new_lprops->flags & LPROPS_CAT_MASK; switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: lpt_heap_replace(c, new_lprops, cat); break; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FREEABLE: case LPROPS_FRDI_IDX: list_replace(&old_lprops->list, &new_lprops->list); break; default: ubifs_assert(c, 0); } } /** * ubifs_ensure_cat - ensure LEB properties are categorized. * @c: UBIFS file-system description object * @lprops: LEB properties * * A LEB may have fallen off of the bottom of a heap, and ended up as * un-categorized even though it has enough space for us now. If that is the * case this function will put the LEB back onto a heap. */ void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops) { int cat = lprops->flags & LPROPS_CAT_MASK; if (cat != LPROPS_UNCAT) return; cat = ubifs_categorize_lprops(c, lprops); if (cat == LPROPS_UNCAT) return; ubifs_remove_from_cat(c, lprops, LPROPS_UNCAT); ubifs_add_to_cat(c, lprops, cat); } /** * ubifs_categorize_lprops - categorize LEB properties. * @c: UBIFS file-system description object * @lprops: LEB properties to categorize * * LEB properties are categorized to enable fast find operations. This function * returns the LEB category to which the LEB properties belong. Note however * that if the LEB category is stored as a heap and the heap is full, the * LEB properties may have their category changed to %LPROPS_UNCAT. */ int ubifs_categorize_lprops(const struct ubifs_info *c, const struct ubifs_lprops *lprops) { if (lprops->flags & LPROPS_TAKEN) return LPROPS_UNCAT; if (lprops->free == c->leb_size) { ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); return LPROPS_EMPTY; } if (lprops->free + lprops->dirty == c->leb_size) { if (lprops->flags & LPROPS_INDEX) return LPROPS_FRDI_IDX; else return LPROPS_FREEABLE; } if (lprops->flags & LPROPS_INDEX) { if (lprops->dirty + lprops->free >= c->min_idx_node_sz) return LPROPS_DIRTY_IDX; } else { if (lprops->dirty >= c->dead_wm && lprops->dirty > lprops->free) return LPROPS_DIRTY; if (lprops->free > 0) return LPROPS_FREE; } return LPROPS_UNCAT; } /** * change_category - change LEB properties category. * @c: UBIFS file-system description object * @lprops: LEB properties to re-categorize * * LEB properties are categorized to enable fast find operations. When the LEB * properties change they must be re-categorized. */ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) { int old_cat = lprops->flags & LPROPS_CAT_MASK; int new_cat = ubifs_categorize_lprops(c, lprops); if (old_cat == new_cat) { struct ubifs_lpt_heap *heap; /* lprops on a heap now must be moved up or down */ if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT) return; /* Not on a heap */ heap = &c->lpt_heap[new_cat - 1]; adjust_lpt_heap(c, heap, lprops, lprops->hpos, new_cat); } else { ubifs_remove_from_cat(c, lprops, old_cat); ubifs_add_to_cat(c, lprops, new_cat); } } /** * ubifs_calc_dark - calculate LEB dark space size. * @c: the UBIFS file-system description object * @spc: amount of free and dirty space in the LEB * * This function calculates and returns amount of dark space in an LEB which * has @spc bytes of free and dirty space. * * UBIFS is trying to account the space which might not be usable, and this * space is called "dark space". For example, if an LEB has only %512 free * bytes, it is dark space, because it cannot fit a large data node. */ int ubifs_calc_dark(const struct ubifs_info *c, int spc) { ubifs_assert(c, !(spc & 7)); if (spc < c->dark_wm) return spc; /* * If we have slightly more space then the dark space watermark, we can * anyway safely assume it we'll be able to write a node of the * smallest size there. */ if (spc - c->dark_wm < MIN_WRITE_SZ) return spc - MIN_WRITE_SZ; return c->dark_wm; } /** * is_lprops_dirty - determine if LEB properties are dirty. * @c: the UBIFS file-system description object * @lprops: LEB properties to test */ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) { struct ubifs_pnode *pnode; int pos; pos = (lprops->lnum - c->main_first) & (UBIFS_LPT_FANOUT - 1); pnode = (struct ubifs_pnode *)container_of(lprops - pos, struct ubifs_pnode, lprops[0]); return !test_bit(COW_CNODE, &pnode->flags) && test_bit(DIRTY_CNODE, &pnode->flags); } /** * ubifs_change_lp - change LEB properties. * @c: the UBIFS file-system description object * @lp: LEB properties to change * @free: new free space amount * @dirty: new dirty space amount * @flags: new flags * @idx_gc_cnt: change to the count of @idx_gc list * * This function changes LEB properties (@free, @dirty or @flag). However, the * property which has the %LPROPS_NC value is not changed. Returns a pointer to * the updated LEB properties on success and a negative error code on failure. * * Note, the LEB properties may have had to be copied (due to COW) and * consequently the pointer returned may not be the same as the pointer * passed. */ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, const struct ubifs_lprops *lp, int free, int dirty, int flags, int idx_gc_cnt) { /* * This is the only function that is allowed to change lprops, so we * discard the "const" qualifier. */ struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp; dbg_lp("LEB %d, free %d, dirty %d, flags %d", lprops->lnum, free, dirty, flags); ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); ubifs_assert(c, c->lst.empty_lebs >= 0 && c->lst.empty_lebs <= c->main_lebs); ubifs_assert(c, c->freeable_cnt >= 0); ubifs_assert(c, c->freeable_cnt <= c->main_lebs); ubifs_assert(c, c->lst.taken_empty_lebs >= 0); ubifs_assert(c, c->lst.taken_empty_lebs <= c->lst.empty_lebs); ubifs_assert(c, !(c->lst.total_free & 7) && !(c->lst.total_dirty & 7)); ubifs_assert(c, !(c->lst.total_dead & 7) && !(c->lst.total_dark & 7)); ubifs_assert(c, !(c->lst.total_used & 7)); ubifs_assert(c, free == LPROPS_NC || free >= 0); ubifs_assert(c, dirty == LPROPS_NC || dirty >= 0); if (!is_lprops_dirty(c, lprops)) { lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum); if (IS_ERR(lprops)) return lprops; } else ubifs_assert(c, lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum)); ubifs_assert(c, !(lprops->free & 7) && !(lprops->dirty & 7)); spin_lock(&c->space_lock); if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c->lst.taken_empty_lebs -= 1; if (!(lprops->flags & LPROPS_INDEX)) { int old_spc; old_spc = lprops->free + lprops->dirty; if (old_spc < c->dead_wm) c->lst.total_dead -= old_spc; else c->lst.total_dark -= ubifs_calc_dark(c, old_spc); c->lst.total_used -= c->leb_size - old_spc; } if (free != LPROPS_NC) { free = ALIGN(free, 8); c->lst.total_free += free - lprops->free; /* Increase or decrease empty LEBs counter if needed */ if (free == c->leb_size) { if (lprops->free != c->leb_size) c->lst.empty_lebs += 1; } else if (lprops->free == c->leb_size) c->lst.empty_lebs -= 1; lprops->free = free; } if (dirty != LPROPS_NC) { dirty = ALIGN(dirty, 8); c->lst.total_dirty += dirty - lprops->dirty; lprops->dirty = dirty; } if (flags != LPROPS_NC) { /* Take care about indexing LEBs counter if needed */ if ((lprops->flags & LPROPS_INDEX)) { if (!(flags & LPROPS_INDEX)) c->lst.idx_lebs -= 1; } else if (flags & LPROPS_INDEX) c->lst.idx_lebs += 1; lprops->flags = flags; } if (!(lprops->flags & LPROPS_INDEX)) { int new_spc; new_spc = lprops->free + lprops->dirty; if (new_spc < c->dead_wm) c->lst.total_dead += new_spc; else c->lst.total_dark += ubifs_calc_dark(c, new_spc); c->lst.total_used += c->leb_size - new_spc; } if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c->lst.taken_empty_lebs += 1; change_category(c, lprops); c->idx_gc_cnt += idx_gc_cnt; spin_unlock(&c->space_lock); return lprops; } /** * ubifs_get_lp_stats - get lprops statistics. * @c: UBIFS file-system description object * @lst: return statistics */ void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) { spin_lock(&c->space_lock); memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats)); spin_unlock(&c->space_lock); } /** * ubifs_change_one_lp - change LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to change properties for * @free: amount of free space * @dirty: amount of dirty space * @flags_set: flags to set * @flags_clean: flags to clean * @idx_gc_cnt: change to the count of idx_gc list * * This function changes properties of LEB @lnum. It is a helper wrapper over * 'ubifs_change_lp()' which hides lprops get/release. The arguments are the * same as in case of 'ubifs_change_lp()'. Returns zero in case of success and * a negative error code in case of failure. */ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, int flags_set, int flags_clean, int idx_gc_cnt) { int err = 0, flags; const struct ubifs_lprops *lp; ubifs_get_lprops(c); lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } flags = (lp->flags | flags_set) & ~flags_clean; lp = ubifs_change_lp(c, lp, free, dirty, flags, idx_gc_cnt); if (IS_ERR(lp)) err = PTR_ERR(lp); out: ubifs_release_lprops(c); if (err) ubifs_err(c, "cannot change properties of LEB %d, error %d", lnum, err); return err; } /** * ubifs_update_one_lp - update LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to change properties for * @free: amount of free space * @dirty: amount of dirty space to add * @flags_set: flags to set * @flags_clean: flags to clean * * This function is the same as 'ubifs_change_one_lp()' but @dirty is added to * current dirty space, not substitutes it. */ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, int flags_set, int flags_clean) { int err = 0, flags; const struct ubifs_lprops *lp; ubifs_get_lprops(c); lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } flags = (lp->flags | flags_set) & ~flags_clean; lp = ubifs_change_lp(c, lp, free, lp->dirty + dirty, flags, 0); if (IS_ERR(lp)) err = PTR_ERR(lp); out: ubifs_release_lprops(c); if (err) ubifs_err(c, "cannot update properties of LEB %d, error %d", lnum, err); return err; } /** * ubifs_read_one_lp - read LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to read properties for * @lp: where to store read properties * * This helper function reads properties of a LEB @lnum and stores them in @lp. * Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp) { int err = 0; const struct ubifs_lprops *lpp; ubifs_get_lprops(c); lpp = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lpp)) { err = PTR_ERR(lpp); ubifs_err(c, "cannot read properties of LEB %d, error %d", lnum, err); goto out; } memcpy(lp, lpp, sizeof(struct ubifs_lprops)); out: ubifs_release_lprops(c); return err; } /** * ubifs_fast_find_free - try to find a LEB with free space quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a LEB with free space or %NULL if * the function is unable to find a LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); heap = &c->lpt_heap[LPROPS_FREE - 1]; if (heap->cnt == 0) return NULL; lprops = heap->arr[0]; ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_fast_find_empty - try to find an empty LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for an empty LEB or %NULL if the * function is unable to find an empty LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->empty_list)) return NULL; lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list); ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); ubifs_assert(c, lprops->free == c->leb_size); return lprops; } /** * ubifs_fast_find_freeable - try to find a freeable LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a freeable LEB or %NULL if the * function is unable to find a freeable LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->freeable_list)) return NULL; lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list); ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); ubifs_assert(c, c->freeable_cnt > 0); return lprops; } /** * ubifs_fast_find_frdi_idx - try to find a freeable index LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a freeable index LEB or %NULL if the * function is unable to find a freeable index LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->frdi_idx_list)) return NULL; lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list); ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); ubifs_assert(c, (lprops->flags & LPROPS_INDEX)); ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); return lprops; } /* * Everything below is related to debugging. */ /** * dbg_check_cats - check category heaps and lists. * @c: UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ int dbg_check_cats(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct list_head *pos; int i, cat; if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return 0; list_for_each_entry(lprops, &c->empty_list, list) { if (lprops->free != c->leb_size) { ubifs_err(c, "non-empty LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err(c, "taken LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } } i = 0; list_for_each_entry(lprops, &c->freeable_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { ubifs_err(c, "non-freeable LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err(c, "taken LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } i += 1; } if (i != c->freeable_cnt) { ubifs_err(c, "freeable list count %d expected %d", i, c->freeable_cnt); return -EINVAL; } i = 0; list_for_each(pos, &c->idx_gc) i += 1; if (i != c->idx_gc_cnt) { ubifs_err(c, "idx_gc list count %d expected %d", i, c->idx_gc_cnt); return -EINVAL; } list_for_each_entry(lprops, &c->frdi_idx_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { ubifs_err(c, "non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err(c, "taken LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (!(lprops->flags & LPROPS_INDEX)) { ubifs_err(c, "non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } } for (cat = 1; cat <= LPROPS_HEAP_CNT; cat++) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (!lprops) { ubifs_err(c, "null ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->hpos != i) { ubifs_err(c, "bad ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err(c, "taken LEB in LPT heap cat %d", cat); return -EINVAL; } } } return 0; } void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, int add_pos) { int i = 0, j, err = 0; if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return; for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; struct ubifs_lprops *lp; if (i != add_pos) if ((lprops->flags & LPROPS_CAT_MASK) != cat) { err = 1; goto out; } if (lprops->hpos != i) { err = 2; goto out; } lp = ubifs_lpt_lookup(c, lprops->lnum); if (IS_ERR(lp)) { err = 3; goto out; } if (lprops != lp) { ubifs_err(c, "lprops %zx lp %zx lprops->lnum %d lp->lnum %d", (size_t)lprops, (size_t)lp, lprops->lnum, lp->lnum); err = 4; goto out; } for (j = 0; j < i; j++) { lp = heap->arr[j]; if (lp == lprops) { err = 5; goto out; } if (lp->lnum == lprops->lnum) { err = 6; goto out; } } } out: if (err) { ubifs_err(c, "failed cat %d hpos %d err %d", cat, i, err); dump_stack(); ubifs_dump_heap(c, heap, cat); } } /** * scan_check_cb - scan callback. * @c: the UBIFS file-system description object * @lp: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @arg: lprops statistics to update * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_check_cb(struct ubifs_info *c, const struct ubifs_lprops *lp, int in_tree, void *arg) { struct ubifs_lp_stats *lst = arg; struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret; void *buf = NULL; cat = lp->flags & LPROPS_CAT_MASK; if (cat != LPROPS_UNCAT) { cat = ubifs_categorize_lprops(c, lp); if (cat != (lp->flags & LPROPS_CAT_MASK)) { ubifs_err(c, "bad LEB category %d expected %d", (lp->flags & LPROPS_CAT_MASK), cat); return -EINVAL; } } /* Check lp is on its category list (if it has one) */ if (in_tree) { struct list_head *list = NULL; switch (cat) { case LPROPS_EMPTY: list = &c->empty_list; break; case LPROPS_FREEABLE: list = &c->freeable_list; break; case LPROPS_FRDI_IDX: list = &c->frdi_idx_list; break; case LPROPS_UNCAT: list = &c->uncat_list; break; } if (list) { struct ubifs_lprops *lprops; int found = 0; list_for_each_entry(lprops, list, list) { if (lprops == lp) { found = 1; break; } } if (!found) { ubifs_err(c, "bad LPT list (category %d)", cat); return -EINVAL; } } } /* Check lp is on its category heap (if it has one) */ if (in_tree && cat > 0 && cat <= LPROPS_HEAP_CNT) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || lp != heap->arr[lp->hpos]) { ubifs_err(c, "bad LPT heap (category %d)", cat); return -EINVAL; } } /* * After an unclean unmount, empty and freeable LEBs * may contain garbage - do not scan them. */ if (lp->free == c->leb_size) { lst->empty_lebs += 1; lst->total_free += c->leb_size; lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } if (lp->free + lp->dirty == c->leb_size && !(lp->flags & LPROPS_INDEX)) { lst->total_free += lp->free; lst->total_dirty += lp->dirty; lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } buf = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) return -ENOMEM; sleb = ubifs_scan(c, lnum, 0, buf, 0); if (IS_ERR(sleb)) { ret = PTR_ERR(sleb); if (ret == -EUCLEAN) { ubifs_dump_lprops(c); ubifs_dump_budg(c, &c->bi); } goto out; } is_idx = -1; list_for_each_entry(snod, &sleb->nodes, list) { int found, level = 0; cond_resched(); if (is_idx == -1) is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0; if (is_idx && snod->type != UBIFS_IDX_NODE) { ubifs_err(c, "indexing node in data LEB %d:%d", lnum, snod->offs); goto out_destroy; } if (snod->type == UBIFS_IDX_NODE) { struct ubifs_idx_node *idx = snod->node; key_read(c, ubifs_idx_key(c, idx), &snod->key); level = le16_to_cpu(idx->level); } found = ubifs_tnc_has_node(c, &snod->key, level, lnum, snod->offs, is_idx); if (found) { if (found < 0) goto out_destroy; used += ALIGN(snod->len, 8); } } free = c->leb_size - sleb->endpt; dirty = sleb->endpt - used; if (free > c->leb_size || free < 0 || dirty > c->leb_size || dirty < 0) { ubifs_err(c, "bad calculated accounting for LEB %d: free %d, dirty %d", lnum, free, dirty); goto out_destroy; } if (lp->free + lp->dirty == c->leb_size && free + dirty == c->leb_size) if ((is_idx && !(lp->flags & LPROPS_INDEX)) || (!is_idx && free == c->leb_size) || lp->free == c->leb_size) { /* * Empty or freeable LEBs could contain index * nodes from an uncompleted commit due to an * unclean unmount. Or they could be empty for * the same reason. Or it may simply not have been * unmapped. */ free = lp->free; dirty = lp->dirty; is_idx = 0; } if (is_idx && lp->free + lp->dirty == free + dirty && lnum != c->ihead_lnum) { /* * After an unclean unmount, an index LEB could have a different * amount of free space than the value recorded by lprops. That * is because the in-the-gaps method may use free space or * create free space (as a side-effect of using ubi_leb_change * and not writing the whole LEB). The incorrect free space * value is not a problem because the index is only ever * allocated empty LEBs, so there will never be an attempt to * write to the free space at the end of an index LEB - except * by the in-the-gaps method for which it is not a problem. */ free = lp->free; dirty = lp->dirty; } if (lp->free != free || lp->dirty != dirty) goto out_print; if (is_idx && !(lp->flags & LPROPS_INDEX)) { if (free == c->leb_size) /* Free but not unmapped LEB, it's fine */ is_idx = 0; else { ubifs_err(c, "indexing node without indexing flag"); goto out_print; } } if (!is_idx && (lp->flags & LPROPS_INDEX)) { ubifs_err(c, "data node with indexing flag"); goto out_print; } if (free == c->leb_size) lst->empty_lebs += 1; if (is_idx) lst->idx_lebs += 1; if (!(lp->flags & LPROPS_INDEX)) lst->total_used += c->leb_size - free - dirty; lst->total_free += free; lst->total_dirty += dirty; if (!(lp->flags & LPROPS_INDEX)) { int spc = free + dirty; if (spc < c->dead_wm) lst->total_dead += spc; else lst->total_dark += ubifs_calc_dark(c, spc); } ubifs_scan_destroy(sleb); vfree(buf); return LPT_SCAN_CONTINUE; out_print: ubifs_err(c, "bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d", lnum, lp->free, lp->dirty, lp->flags, free, dirty); ubifs_dump_leb(c, lnum); out_destroy: ubifs_scan_destroy(sleb); ret = -EINVAL; out: vfree(buf); return ret; } /** * dbg_check_lprops - check all LEB properties. * @c: UBIFS file-system description object * * This function checks all LEB properties and makes sure they are all correct. * It returns zero if everything is fine, %-EINVAL if there is an inconsistency * and other negative error codes in case of other errors. This function is * called while the file system is locked (because of commit start), so no * additional locking is required. Note that locking the LPT mutex would cause * a circular lock dependency with the TNC mutex. */ int dbg_check_lprops(struct ubifs_info *c) { int i, err; struct ubifs_lp_stats lst; if (!dbg_is_chk_lprops(c)) return 0; /* * As we are going to scan the media, the write buffers have to be * synchronized. */ for (i = 0; i < c->jhead_cnt; i++) { err = ubifs_wbuf_sync(&c->jheads[i].wbuf); if (err) return err; } memset(&lst, 0, sizeof(struct ubifs_lp_stats)); err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1, scan_check_cb, &lst); if (err && err != -ENOSPC) goto out; if (lst.empty_lebs != c->lst.empty_lebs || lst.idx_lebs != c->lst.idx_lebs || lst.total_free != c->lst.total_free || lst.total_dirty != c->lst.total_dirty || lst.total_used != c->lst.total_used) { ubifs_err(c, "bad overall accounting"); ubifs_err(c, "calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", lst.empty_lebs, lst.idx_lebs, lst.total_free, lst.total_dirty, lst.total_used); ubifs_err(c, "read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, c->lst.total_dirty, c->lst.total_used); err = -EINVAL; goto out; } if (lst.total_dead != c->lst.total_dead || lst.total_dark != c->lst.total_dark) { ubifs_err(c, "bad dead/dark space accounting"); ubifs_err(c, "calculated: total_dead %lld, total_dark %lld", lst.total_dead, lst.total_dark); ubifs_err(c, "read from lprops: total_dead %lld, total_dark %lld", c->lst.total_dead, c->lst.total_dark); err = -EINVAL; goto out; } err = dbg_check_cats(c); out: return err; }
// SPDX-License-Identifier: GPL-2.0+ /* * An I2C driver for the Intersil ISL 12026 * * Copyright (c) 2018 Cavium, Inc. */ #include <linux/bcd.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nvmem-provider.h> #include <linux/of.h> #include <linux/rtc.h> #include <linux/slab.h> /* register offsets */ #define ISL12026_REG_PWR 0x14 # define ISL12026_REG_PWR_BSW BIT(6) # define ISL12026_REG_PWR_SBIB BIT(7) #define ISL12026_REG_SC 0x30 #define ISL12026_REG_HR 0x32 # define ISL12026_REG_HR_MIL BIT(7) /* military or 24 hour time */ #define ISL12026_REG_SR 0x3f # define ISL12026_REG_SR_RTCF BIT(0) # define ISL12026_REG_SR_WEL BIT(1) # define ISL12026_REG_SR_RWEL BIT(2) # define ISL12026_REG_SR_MBZ BIT(3) # define ISL12026_REG_SR_OSCF BIT(4) /* The EEPROM array responds at i2c address 0x57 */ #define ISL12026_EEPROM_ADDR 0x57 #define ISL12026_PAGESIZE 16 #define ISL12026_NVMEM_WRITE_TIME 20 struct isl12026 { struct rtc_device *rtc; struct i2c_client *nvm_client; }; static int isl12026_read_reg(struct i2c_client *client, int reg) { u8 addr[] = {0, reg}; u8 val; int ret; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = sizeof(addr), .buf = addr }, { .addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &val } }; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "read reg error, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; } else { ret = val; } return ret; } static int isl12026_arm_write(struct i2c_client *client) { int ret; u8 op[3]; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = 1, .buf = op }; /* Set SR.WEL */ op[0] = 0; op[1] = ISL12026_REG_SR; op[2] = ISL12026_REG_SR_WEL; msg.len = 3; ret = i2c_transfer(client->adapter, &msg, 1); if (ret != 1) { dev_err(&client->dev, "write error SR.WEL, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } /* Set SR.WEL and SR.RWEL */ op[2] = ISL12026_REG_SR_WEL | ISL12026_REG_SR_RWEL; msg.len = 3; ret = i2c_transfer(client->adapter, &msg, 1); if (ret != 1) { dev_err(&client->dev, "write error SR.WEL|SR.RWEL, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } else { ret = 0; } out: return ret; } static int isl12026_disarm_write(struct i2c_client *client) { int ret; u8 op[3] = {0, ISL12026_REG_SR, 0}; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = sizeof(op), .buf = op }; ret = i2c_transfer(client->adapter, &msg, 1); if (ret != 1) { dev_err(&client->dev, "write error SR, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; } else { ret = 0; } return ret; } static int isl12026_write_reg(struct i2c_client *client, int reg, u8 val) { int ret; u8 op[3] = {0, reg, val}; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = sizeof(op), .buf = op }; ret = isl12026_arm_write(client); if (ret) return ret; ret = i2c_transfer(client->adapter, &msg, 1); if (ret != 1) { dev_err(&client->dev, "write error CCR, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } msleep(ISL12026_NVMEM_WRITE_TIME); ret = isl12026_disarm_write(client); out: return ret; } static int isl12026_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); int ret; u8 op[10]; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = sizeof(op), .buf = op }; ret = isl12026_arm_write(client); if (ret) return ret; /* Set the CCR registers */ op[0] = 0; op[1] = ISL12026_REG_SC; op[2] = bin2bcd(tm->tm_sec); /* SC */ op[3] = bin2bcd(tm->tm_min); /* MN */ op[4] = bin2bcd(tm->tm_hour) | ISL12026_REG_HR_MIL; /* HR */ op[5] = bin2bcd(tm->tm_mday); /* DT */ op[6] = bin2bcd(tm->tm_mon + 1); /* MO */ op[7] = bin2bcd(tm->tm_year % 100); /* YR */ op[8] = bin2bcd(tm->tm_wday & 7); /* DW */ op[9] = bin2bcd(tm->tm_year >= 100 ? 20 : 19); /* Y2K */ ret = i2c_transfer(client->adapter, &msg, 1); if (ret != 1) { dev_err(&client->dev, "write error CCR, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } ret = isl12026_disarm_write(client); out: return ret; } static int isl12026_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); u8 ccr[8]; u8 addr[2]; u8 sr; int ret; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = sizeof(addr), .buf = addr }, { .addr = client->addr, .flags = I2C_M_RD, } }; /* First, read SR */ addr[0] = 0; addr[1] = ISL12026_REG_SR; msgs[1].len = 1; msgs[1].buf = &sr; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "read error, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } if (sr & ISL12026_REG_SR_RTCF) dev_warn(&client->dev, "Real-Time Clock Failure on read\n"); if (sr & ISL12026_REG_SR_OSCF) dev_warn(&client->dev, "Oscillator Failure on read\n"); /* Second, CCR regs */ addr[0] = 0; addr[1] = ISL12026_REG_SC; msgs[1].len = sizeof(ccr); msgs[1].buf = ccr; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "read error, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; goto out; } tm->tm_sec = bcd2bin(ccr[0] & 0x7F); tm->tm_min = bcd2bin(ccr[1] & 0x7F); if (ccr[2] & ISL12026_REG_HR_MIL) tm->tm_hour = bcd2bin(ccr[2] & 0x3F); else tm->tm_hour = bcd2bin(ccr[2] & 0x1F) + ((ccr[2] & 0x20) ? 12 : 0); tm->tm_mday = bcd2bin(ccr[3] & 0x3F); tm->tm_mon = bcd2bin(ccr[4] & 0x1F) - 1; tm->tm_year = bcd2bin(ccr[5]); if (bcd2bin(ccr[7]) == 20) tm->tm_year += 100; tm->tm_wday = ccr[6] & 0x07; ret = 0; out: return ret; } static const struct rtc_class_ops isl12026_rtc_ops = { .read_time = isl12026_rtc_read_time, .set_time = isl12026_rtc_set_time, }; static int isl12026_nvm_read(void *p, unsigned int offset, void *val, size_t bytes) { struct isl12026 *priv = p; int ret; u8 addr[2]; struct i2c_msg msgs[] = { { .addr = priv->nvm_client->addr, .flags = 0, .len = sizeof(addr), .buf = addr }, { .addr = priv->nvm_client->addr, .flags = I2C_M_RD, .buf = val } }; /* * offset and bytes checked and limited by nvmem core, so * proceed without further checks. */ ret = mutex_lock_interruptible(&priv->rtc->ops_lock); if (ret) return ret; /* 2 bytes of address, most significant first */ addr[0] = offset >> 8; addr[1] = offset; msgs[1].len = bytes; ret = i2c_transfer(priv->nvm_client->adapter, msgs, ARRAY_SIZE(msgs)); mutex_unlock(&priv->rtc->ops_lock); if (ret != ARRAY_SIZE(msgs)) { dev_err(&priv->nvm_client->dev, "nvmem read error, ret=%d\n", ret); return ret < 0 ? ret : -EIO; } return 0; } static int isl12026_nvm_write(void *p, unsigned int offset, void *val, size_t bytes) { struct isl12026 *priv = p; int ret; u8 *v = val; size_t chunk_size, num_written; u8 payload[ISL12026_PAGESIZE + 2]; /* page + 2 address bytes */ struct i2c_msg msgs[] = { { .addr = priv->nvm_client->addr, .flags = 0, .buf = payload } }; /* * offset and bytes checked and limited by nvmem core, so * proceed without further checks. */ ret = mutex_lock_interruptible(&priv->rtc->ops_lock); if (ret) return ret; num_written = 0; while (bytes) { chunk_size = round_down(offset, ISL12026_PAGESIZE) + ISL12026_PAGESIZE - offset; chunk_size = min(bytes, chunk_size); /* * 2 bytes of address, most significant first, followed * by page data bytes */ memcpy(payload + 2, v + num_written, chunk_size); payload[0] = offset >> 8; payload[1] = offset; msgs[0].len = chunk_size + 2; ret = i2c_transfer(priv->nvm_client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(&priv->nvm_client->dev, "nvmem write error, ret=%d\n", ret); ret = ret < 0 ? ret : -EIO; break; } ret = 0; bytes -= chunk_size; offset += chunk_size; num_written += chunk_size; msleep(ISL12026_NVMEM_WRITE_TIME); } mutex_unlock(&priv->rtc->ops_lock); return ret; } static void isl12026_force_power_modes(struct i2c_client *client) { int ret; int pwr, requested_pwr; u32 bsw_val, sbib_val; bool set_bsw, set_sbib; /* * If we can read the of_property, set the specified value. * If there is an error reading the of_property (likely * because it does not exist), keep the current value. */ ret = of_property_read_u32(client->dev.of_node, "isil,pwr-bsw", &bsw_val); set_bsw = (ret == 0); ret = of_property_read_u32(client->dev.of_node, "isil,pwr-sbib", &sbib_val); set_sbib = (ret == 0); /* Check if PWR.BSW and/or PWR.SBIB need specified values */ if (!set_bsw && !set_sbib) return; pwr = isl12026_read_reg(client, ISL12026_REG_PWR); if (pwr < 0) { dev_warn(&client->dev, "Error: Failed to read PWR %d\n", pwr); return; } requested_pwr = pwr; if (set_bsw) { if (bsw_val) requested_pwr |= ISL12026_REG_PWR_BSW; else requested_pwr &= ~ISL12026_REG_PWR_BSW; } /* else keep current BSW */ if (set_sbib) { if (sbib_val) requested_pwr |= ISL12026_REG_PWR_SBIB; else requested_pwr &= ~ISL12026_REG_PWR_SBIB; } /* else keep current SBIB */ if (pwr >= 0 && pwr != requested_pwr) { dev_dbg(&client->dev, "PWR: %02x\n", pwr); dev_dbg(&client->dev, "Updating PWR to: %02x\n", requested_pwr); isl12026_write_reg(client, ISL12026_REG_PWR, requested_pwr); } } static int isl12026_probe(struct i2c_client *client) { struct isl12026 *priv; int ret; struct nvmem_config nvm_cfg = { .name = "isl12026-", .base_dev = &client->dev, .stride = 1, .word_size = 1, .size = 512, .reg_read = isl12026_nvm_read, .reg_write = isl12026_nvm_write, }; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; i2c_set_clientdata(client, priv); isl12026_force_power_modes(client); priv->nvm_client = i2c_new_dummy_device(client->adapter, ISL12026_EEPROM_ADDR); if (IS_ERR(priv->nvm_client)) return PTR_ERR(priv->nvm_client); priv->rtc = devm_rtc_allocate_device(&client->dev); ret = PTR_ERR_OR_ZERO(priv->rtc); if (ret) return ret; priv->rtc->ops = &isl12026_rtc_ops; nvm_cfg.priv = priv; ret = devm_rtc_nvmem_register(priv->rtc, &nvm_cfg); if (ret) return ret; return devm_rtc_register_device(priv->rtc); } static void isl12026_remove(struct i2c_client *client) { struct isl12026 *priv = i2c_get_clientdata(client); i2c_unregister_device(priv->nvm_client); } static const struct of_device_id isl12026_dt_match[] = { { .compatible = "isil,isl12026" }, { } }; MODULE_DEVICE_TABLE(of, isl12026_dt_match); static struct i2c_driver isl12026_driver = { .driver = { .name = "rtc-isl12026", .of_match_table = isl12026_dt_match, }, .probe = isl12026_probe, .remove = isl12026_remove, }; module_i2c_driver(isl12026_driver); MODULE_DESCRIPTION("ISL 12026 RTC driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 - Google LLC * Author: David Brazdil <[email protected]> */ #include <asm/kvm_asm.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <linux/arm-smccc.h> #include <linux/kvm_host.h> #include <uapi/linux/psci.h> #include <nvhe/memory.h> #include <nvhe/trap_handler.h> void kvm_hyp_cpu_entry(unsigned long r0); void kvm_hyp_cpu_resume(unsigned long r0); void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); /* Config options set by the host. */ struct kvm_host_psci_config __ro_after_init kvm_host_psci_config; #define INVALID_CPU_ID UINT_MAX struct psci_boot_args { atomic_t lock; unsigned long pc; unsigned long r0; }; #define PSCI_BOOT_ARGS_UNLOCKED 0 #define PSCI_BOOT_ARGS_LOCKED 1 #define PSCI_BOOT_ARGS_INIT \ ((struct psci_boot_args){ \ .lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED), \ }) static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT; static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT; #define is_psci_0_1(what, func_id) \ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \ (func_id) == kvm_host_psci_config.function_ids_0_1.what) static bool is_psci_0_1_call(u64 func_id) { return (is_psci_0_1(cpu_suspend, func_id) || is_psci_0_1(cpu_on, func_id) || is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id)); } static bool is_psci_0_2_call(u64 func_id) { /* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */ return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) || (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31)); } static unsigned long psci_call(unsigned long fn, unsigned long arg0, unsigned long arg1, unsigned long arg2) { struct arm_smccc_res res; arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res); return res.a0; } static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) { return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1), cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); } static unsigned int find_cpu_id(u64 mpidr) { unsigned int i; /* Reject invalid MPIDRs */ if (mpidr & ~MPIDR_HWID_BITMASK) return INVALID_CPU_ID; for (i = 0; i < NR_CPUS; i++) { if (cpu_logical_map(i) == mpidr) return i; } return INVALID_CPU_ID; } static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args) { return atomic_cmpxchg_acquire(&args->lock, PSCI_BOOT_ARGS_UNLOCKED, PSCI_BOOT_ARGS_LOCKED) == PSCI_BOOT_ARGS_UNLOCKED; } static __always_inline void release_boot_args(struct psci_boot_args *args) { atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED); } static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) { DECLARE_REG(u64, mpidr, host_ctxt, 1); DECLARE_REG(unsigned long, pc, host_ctxt, 2); DECLARE_REG(unsigned long, r0, host_ctxt, 3); unsigned int cpu_id; struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; int ret; /* * Find the logical CPU ID for the given MPIDR. The search set is * the set of CPUs that were online at the point of KVM initialization. * Booting other CPUs is rejected because their cpufeatures were not * checked against the finalized capabilities. This could be relaxed * by doing the feature checks in hyp. */ cpu_id = find_cpu_id(mpidr); if (cpu_id == INVALID_CPU_ID) return PSCI_RET_INVALID_PARAMS; boot_args = per_cpu_ptr(&cpu_on_args, cpu_id); init_params = per_cpu_ptr(&kvm_init_params, cpu_id); /* Check if the target CPU is already being booted. */ if (!try_acquire_boot_args(boot_args)) return PSCI_RET_ALREADY_ON; boot_args->pc = pc; boot_args->r0 = r0; wmb(); ret = psci_call(func_id, mpidr, __hyp_pa(&kvm_hyp_cpu_entry), __hyp_pa(init_params)); /* If successful, the lock will be released by the target CPU. */ if (ret != PSCI_RET_SUCCESS) release_boot_args(boot_args); return ret; } static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) { DECLARE_REG(u64, power_state, host_ctxt, 1); DECLARE_REG(unsigned long, pc, host_ctxt, 2); DECLARE_REG(unsigned long, r0, host_ctxt, 3); struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; boot_args = this_cpu_ptr(&suspend_args); init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core * can only suspend itself. Racy CPU_ON calls use a separate struct. */ boot_args->pc = pc; boot_args->r0 = r0; /* * Will either return if shallow sleep state, or wake up into the entry * point if it is a deep sleep state. */ return psci_call(func_id, power_state, __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params)); } static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, pc, host_ctxt, 1); DECLARE_REG(unsigned long, r0, host_ctxt, 2); struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; boot_args = this_cpu_ptr(&suspend_args); init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core * can only suspend itself. Racy CPU_ON calls use a separate struct. */ boot_args->pc = pc; boot_args->r0 = r0; /* Will only return on error. */ return psci_call(func_id, __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params), 0); } asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) { struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; host_ctxt = host_data_ptr(host_ctxt); if (is_cpu_on) boot_args = this_cpu_ptr(&cpu_on_args); else boot_args = this_cpu_ptr(&suspend_args); cpu_reg(host_ctxt, 0) = boot_args->r0; write_sysreg_el2(boot_args->pc, SYS_ELR); if (is_cpu_on) release_boot_args(boot_args); __host_enter(host_ctxt); } static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) { if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id)) return psci_forward(host_ctxt); if (is_psci_0_1(cpu_on, func_id)) return psci_cpu_on(func_id, host_ctxt); if (is_psci_0_1(cpu_suspend, func_id)) return psci_cpu_suspend(func_id, host_ctxt); return PSCI_RET_NOT_SUPPORTED; } static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) { switch (func_id) { case PSCI_0_2_FN_PSCI_VERSION: case PSCI_0_2_FN_CPU_OFF: case PSCI_0_2_FN64_AFFINITY_INFO: case PSCI_0_2_FN64_MIGRATE: case PSCI_0_2_FN_MIGRATE_INFO_TYPE: case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU: return psci_forward(host_ctxt); /* * SYSTEM_OFF/RESET should not return according to the spec. * Allow it so as to stay robust to broken firmware. */ case PSCI_0_2_FN_SYSTEM_OFF: case PSCI_0_2_FN_SYSTEM_RESET: return psci_forward(host_ctxt); case PSCI_0_2_FN64_CPU_SUSPEND: return psci_cpu_suspend(func_id, host_ctxt); case PSCI_0_2_FN64_CPU_ON: return psci_cpu_on(func_id, host_ctxt); default: return PSCI_RET_NOT_SUPPORTED; } } static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) { switch (func_id) { case PSCI_1_0_FN_PSCI_FEATURES: case PSCI_1_0_FN_SET_SUSPEND_MODE: case PSCI_1_1_FN64_SYSTEM_RESET2: case PSCI_1_3_FN_SYSTEM_OFF2: case PSCI_1_3_FN64_SYSTEM_OFF2: return psci_forward(host_ctxt); case PSCI_1_0_FN64_SYSTEM_SUSPEND: return psci_system_suspend(func_id, host_ctxt); default: return psci_0_2_handler(func_id, host_ctxt); } } bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) { unsigned long ret; switch (kvm_host_psci_config.version) { case PSCI_VERSION(0, 1): if (!is_psci_0_1_call(func_id)) return false; ret = psci_0_1_handler(func_id, host_ctxt); break; case PSCI_VERSION(0, 2): if (!is_psci_0_2_call(func_id)) return false; ret = psci_0_2_handler(func_id, host_ctxt); break; default: if (!is_psci_0_2_call(func_id)) return false; ret = psci_1_0_handler(func_id, host_ctxt); break; } cpu_reg(host_ctxt, 0) = ret; cpu_reg(host_ctxt, 1) = 0; cpu_reg(host_ctxt, 2) = 0; cpu_reg(host_ctxt, 3) = 0; return true; }
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Compatibility interface for userspace libc header coordination: * * Define compatibility macros that are used to control the inclusion or * exclusion of UAPI structures and definitions in coordination with another * userspace C library. * * This header is intended to solve the problem of UAPI definitions that * conflict with userspace definitions. If a UAPI header has such conflicting * definitions then the solution is as follows: * * * Synchronize the UAPI header and the libc headers so either one can be * used and such that the ABI is preserved. If this is not possible then * no simple compatibility interface exists (you need to write translating * wrappers and rename things) and you can't use this interface. * * Then follow this process: * * (a) Include libc-compat.h in the UAPI header. * e.g. #include <linux/libc-compat.h> * This include must be as early as possible. * * (b) In libc-compat.h add enough code to detect that the comflicting * userspace libc header has been included first. * * (c) If the userspace libc header has been included first define a set of * guard macros of the form __UAPI_DEF_FOO and set their values to 1, else * set their values to 0. * * (d) Back in the UAPI header with the conflicting definitions, guard the * definitions with: * #if __UAPI_DEF_FOO * ... * #endif * * This fixes the situation where the linux headers are included *after* the * libc headers. To fix the problem with the inclusion in the other order the * userspace libc headers must be fixed like this: * * * For all definitions that conflict with kernel definitions wrap those * defines in the following: * #if !__UAPI_DEF_FOO * ... * #endif * * This prevents the redefinition of a construct already defined by the kernel. */ #ifndef _UAPI_LIBC_COMPAT_H #define _UAPI_LIBC_COMPAT_H /* We have included glibc headers... */ #if defined(__GLIBC__) /* Coordinate with glibc net/if.h header. */ #if defined(_NET_IF_H) && defined(__USE_MISC) /* GLIBC headers included first so don't define anything * that would already be defined. */ #define __UAPI_DEF_IF_IFCONF 0 #define __UAPI_DEF_IF_IFMAP 0 #define __UAPI_DEF_IF_IFNAMSIZ 0 #define __UAPI_DEF_IF_IFREQ 0 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ #else /* _NET_IF_H */ /* Linux headers included first, and we must define everything * we need. The expectation is that glibc will check the * __UAPI_DEF_* defines and adjust appropriately. */ #define __UAPI_DEF_IF_IFCONF 1 #define __UAPI_DEF_IF_IFMAP 1 #define __UAPI_DEF_IF_IFNAMSIZ 1 #define __UAPI_DEF_IF_IFREQ 1 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 #endif /* _NET_IF_H */ /* Coordinate with glibc netinet/in.h header. */ #if defined(_NETINET_IN_H) /* GLIBC headers included first so don't define anything * that would already be defined. */ #define __UAPI_DEF_IN_ADDR 0 #define __UAPI_DEF_IN_IPPROTO 0 #define __UAPI_DEF_IN_PKTINFO 0 #define __UAPI_DEF_IP_MREQ 0 #define __UAPI_DEF_SOCKADDR_IN 0 #define __UAPI_DEF_IN_CLASS 0 #define __UAPI_DEF_IN6_ADDR 0 /* The exception is the in6_addr macros which must be defined * if the glibc code didn't define them. This guard matches * the guard in glibc/inet/netinet/in.h which defines the * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */ #if defined(__USE_MISC) || defined (__USE_GNU) #define __UAPI_DEF_IN6_ADDR_ALT 0 #else #define __UAPI_DEF_IN6_ADDR_ALT 1 #endif #define __UAPI_DEF_SOCKADDR_IN6 0 #define __UAPI_DEF_IPV6_MREQ 0 #define __UAPI_DEF_IPPROTO_V6 0 #define __UAPI_DEF_IPV6_OPTIONS 0 #define __UAPI_DEF_IN6_PKTINFO 0 #define __UAPI_DEF_IP6_MTUINFO 0 #else /* Linux headers included first, and we must define everything * we need. The expectation is that glibc will check the * __UAPI_DEF_* defines and adjust appropriately. */ #define __UAPI_DEF_IN_ADDR 1 #define __UAPI_DEF_IN_IPPROTO 1 #define __UAPI_DEF_IN_PKTINFO 1 #define __UAPI_DEF_IP_MREQ 1 #define __UAPI_DEF_SOCKADDR_IN 1 #define __UAPI_DEF_IN_CLASS 1 #define __UAPI_DEF_IN6_ADDR 1 /* We unconditionally define the in6_addr macros and glibc must * coordinate. */ #define __UAPI_DEF_IN6_ADDR_ALT 1 #define __UAPI_DEF_SOCKADDR_IN6 1 #define __UAPI_DEF_IPV6_MREQ 1 #define __UAPI_DEF_IPPROTO_V6 1 #define __UAPI_DEF_IPV6_OPTIONS 1 #define __UAPI_DEF_IN6_PKTINFO 1 #define __UAPI_DEF_IP6_MTUINFO 1 #endif /* _NETINET_IN_H */ /* Definitions for xattr.h */ #if defined(_SYS_XATTR_H) #define __UAPI_DEF_XATTR 0 #else #define __UAPI_DEF_XATTR 1 #endif /* If we did not see any headers from any supported C libraries, * or we are being included in the kernel, then define everything * that we need. Check for previous __UAPI_* definitions to give * unsupported C libraries a way to opt out of any kernel definition. */ #else /* !defined(__GLIBC__) */ /* Definitions for if.h */ #ifndef __UAPI_DEF_IF_IFCONF #define __UAPI_DEF_IF_IFCONF 1 #endif #ifndef __UAPI_DEF_IF_IFMAP #define __UAPI_DEF_IF_IFMAP 1 #endif #ifndef __UAPI_DEF_IF_IFNAMSIZ #define __UAPI_DEF_IF_IFNAMSIZ 1 #endif #ifndef __UAPI_DEF_IF_IFREQ #define __UAPI_DEF_IF_IFREQ 1 #endif /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 #endif /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 #endif /* Definitions for in.h */ #ifndef __UAPI_DEF_IN_ADDR #define __UAPI_DEF_IN_ADDR 1 #endif #ifndef __UAPI_DEF_IN_IPPROTO #define __UAPI_DEF_IN_IPPROTO 1 #endif #ifndef __UAPI_DEF_IN_PKTINFO #define __UAPI_DEF_IN_PKTINFO 1 #endif #ifndef __UAPI_DEF_IP_MREQ #define __UAPI_DEF_IP_MREQ 1 #endif #ifndef __UAPI_DEF_SOCKADDR_IN #define __UAPI_DEF_SOCKADDR_IN 1 #endif #ifndef __UAPI_DEF_IN_CLASS #define __UAPI_DEF_IN_CLASS 1 #endif /* Definitions for in6.h */ #ifndef __UAPI_DEF_IN6_ADDR #define __UAPI_DEF_IN6_ADDR 1 #endif #ifndef __UAPI_DEF_IN6_ADDR_ALT #define __UAPI_DEF_IN6_ADDR_ALT 1 #endif #ifndef __UAPI_DEF_SOCKADDR_IN6 #define __UAPI_DEF_SOCKADDR_IN6 1 #endif #ifndef __UAPI_DEF_IPV6_MREQ #define __UAPI_DEF_IPV6_MREQ 1 #endif #ifndef __UAPI_DEF_IPPROTO_V6 #define __UAPI_DEF_IPPROTO_V6 1 #endif #ifndef __UAPI_DEF_IPV6_OPTIONS #define __UAPI_DEF_IPV6_OPTIONS 1 #endif #ifndef __UAPI_DEF_IN6_PKTINFO #define __UAPI_DEF_IN6_PKTINFO 1 #endif #ifndef __UAPI_DEF_IP6_MTUINFO #define __UAPI_DEF_IP6_MTUINFO 1 #endif /* Definitions for xattr.h */ #ifndef __UAPI_DEF_XATTR #define __UAPI_DEF_XATTR 1 #endif #endif /* __GLIBC__ */ #endif /* _UAPI_LIBC_COMPAT_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Generic HDLC support routines for Linux * Point-to-point protocol support * * Copyright (C) 1999 - 2008 Krzysztof Halasa <[email protected]> */ #include <linux/errno.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #define DEBUG_CP 0 /* also bytes# to dump */ #define DEBUG_STATE 0 #define DEBUG_HARD_HEADER 0 #define HDLC_ADDR_ALLSTATIONS 0xFF #define HDLC_CTRL_UI 0x03 #define PID_LCP 0xC021 #define PID_IP 0x0021 #define PID_IPCP 0x8021 #define PID_IPV6 0x0057 #define PID_IPV6CP 0x8057 enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT}; enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ, CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY, LCP_DISC_REQ, CP_CODES}; #if DEBUG_CP static const char *const code_names[CP_CODES] = { "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq", "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard" }; static char debug_buffer[64 + 3 * DEBUG_CP]; #endif enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5}; struct hdlc_header { u8 address; u8 control; __be16 protocol; }; struct cp_header { u8 code; u8 id; __be16 len; }; struct proto { struct net_device *dev; struct timer_list timer; unsigned long timeout; u16 pid; /* protocol ID */ u8 state; u8 cr_id; /* ID of last Configuration-Request */ u8 restart_counter; }; struct ppp { struct proto protos[IDX_COUNT]; spinlock_t lock; unsigned long last_pong; unsigned int req_timeout, cr_retries, term_retries; unsigned int keepalive_interval, keepalive_timeout; u8 seq; /* local sequence number for requests */ u8 echo_id; /* ID of last Echo-Request (LCP) */ }; enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED, STATES, STATE_MASK = 0xF}; enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA, RUC, RXJ_GOOD, RXJ_BAD, EVENTS}; enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100, SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000}; #if DEBUG_STATE static const char *const state_names[STATES] = { "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent", "Opened" }; static const char *const event_names[EVENTS] = { "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN", "RTR", "RTA", "RUC", "RXJ+", "RXJ-" }; #endif static struct sk_buff_head tx_queue; /* used when holding the spin lock */ static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs); static inline struct ppp *get_ppp(struct net_device *dev) { return (struct ppp *)dev_to_hdlc(dev)->state; } static inline struct proto *get_proto(struct net_device *dev, u16 pid) { struct ppp *ppp = get_ppp(dev); switch (pid) { case PID_LCP: return &ppp->protos[IDX_LCP]; case PID_IPCP: return &ppp->protos[IDX_IPCP]; case PID_IPV6CP: return &ppp->protos[IDX_IPV6CP]; default: return NULL; } } static inline const char *proto_name(u16 pid) { switch (pid) { case PID_LCP: return "LCP"; case PID_IPCP: return "IPCP"; case PID_IPV6CP: return "IPV6CP"; default: return NULL; } } static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) { struct hdlc_header *data = (struct hdlc_header *)skb->data; if (skb->len < sizeof(struct hdlc_header)) return htons(ETH_P_HDLC); if (data->address != HDLC_ADDR_ALLSTATIONS || data->control != HDLC_CTRL_UI) return htons(ETH_P_HDLC); switch (data->protocol) { case cpu_to_be16(PID_IP): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IP); case cpu_to_be16(PID_IPV6): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IPV6); default: return htons(ETH_P_HDLC); } } static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev, u16 type, const void *daddr, const void *saddr, unsigned int len) { struct hdlc_header *data; #if DEBUG_HARD_HEADER printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name); #endif skb_push(skb, sizeof(struct hdlc_header)); data = (struct hdlc_header *)skb->data; data->address = HDLC_ADDR_ALLSTATIONS; data->control = HDLC_CTRL_UI; switch (type) { case ETH_P_IP: data->protocol = htons(PID_IP); break; case ETH_P_IPV6: data->protocol = htons(PID_IPV6); break; case PID_LCP: case PID_IPCP: case PID_IPV6CP: data->protocol = htons(type); break; default: /* unknown protocol */ data->protocol = 0; } return sizeof(struct hdlc_header); } static void ppp_tx_flush(void) { struct sk_buff *skb; while ((skb = skb_dequeue(&tx_queue)) != NULL) dev_queue_xmit(skb); } static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, u8 id, unsigned int len, const void *data) { struct sk_buff *skb; struct cp_header *cp; unsigned int magic_len = 0; static u32 magic; #if DEBUG_CP int i; char *ptr; #endif if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY)) magic_len = sizeof(magic); skb = dev_alloc_skb(sizeof(struct hdlc_header) + sizeof(struct cp_header) + magic_len + len); if (!skb) return; skb_reserve(skb, sizeof(struct hdlc_header)); cp = skb_put(skb, sizeof(struct cp_header)); cp->code = code; cp->id = id; cp->len = htons(sizeof(struct cp_header) + magic_len + len); if (magic_len) skb_put_data(skb, &magic, magic_len); if (len) skb_put_data(skb, data, len); #if DEBUG_CP BUG_ON(code >= CP_CODES); ptr = debug_buffer; *ptr = '\x0'; for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) { sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]); ptr += strlen(ptr); } printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name, proto_name(pid), code_names[code], id, debug_buffer); #endif ppp_hard_header(skb, dev, pid, NULL, NULL, 0); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } /* State transition table (compare STD-51) Events Actions TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count TO- = Timeout with counter expired zrc = Zero-Restart-Count RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request RCR- = Receive-Configure-Request (Bad) RCA = Receive-Configure-Ack sca = Send-Configure-Ack RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej RTR = Receive-Terminate-Request str = Send-Terminate-Request RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack RUC = Receive-Unknown-Code scj = Send-Code-Reject RXJ+ = Receive-Code-Reject (permitted) or Receive-Protocol-Reject RXJ- = Receive-Code-Reject (catastrophic) or Receive-Protocol-Reject */ static int cp_table[EVENTS][STATES] = { /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED 0 1 2 3 4 5 6 */ {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */ { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */ { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */ { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */ { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */ { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */ { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */ { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */ { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */ { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */ { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */ { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */ { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */ }; /* SCA: RCR+ must supply id, len and data SCN: RCR- must supply code, id, len and data STA: RTR must supply id SCJ: RUC must supply CP packet len and data */ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code, u8 id, unsigned int len, const void *data) { int old_state, action; struct ppp *ppp = get_ppp(dev); struct proto *proto = get_proto(dev, pid); old_state = proto->state; BUG_ON(old_state >= STATES); BUG_ON(event >= EVENTS); #if DEBUG_STATE printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name, proto_name(pid), event_names[event], state_names[proto->state]); #endif action = cp_table[event][old_state]; proto->state = action & STATE_MASK; if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */ mod_timer(&proto->timer, proto->timeout = jiffies + ppp->req_timeout * HZ); if (action & ZRC) proto->restart_counter = 0; if (action & IRC) proto->restart_counter = (proto->state == STOPPING) ? ppp->term_retries : ppp->cr_retries; if (action & SCR) /* send Configure-Request */ ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq, 0, NULL); if (action & SCA) /* send Configure-Ack */ ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data); if (action & SCN) /* send Configure-Nak/Reject */ ppp_tx_cp(dev, pid, code, id, len, data); if (action & STR) /* send Terminate-Request */ ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL); if (action & STA) /* send Terminate-Ack */ ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL); if (action & SCJ) /* send Code-Reject */ ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data); if (old_state != OPENED && proto->state == OPENED) { netdev_info(dev, "%s up\n", proto_name(pid)); if (pid == PID_LCP) { netif_dormant_off(dev); ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL); ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL); ppp->last_pong = jiffies; mod_timer(&proto->timer, proto->timeout = jiffies + ppp->keepalive_interval * HZ); } } if (old_state == OPENED && proto->state != OPENED) { netdev_info(dev, "%s down\n", proto_name(pid)); if (pid == PID_LCP) { netif_dormant_on(dev); ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL); ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL); } } if (old_state != CLOSED && proto->state == CLOSED) del_timer(&proto->timer); #if DEBUG_STATE printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name, proto_name(pid), event_names[event], state_names[proto->state]); #endif } static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, unsigned int req_len, const u8 *data) { static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 }; const u8 *opt; u8 *out; unsigned int len = req_len, nak_len = 0, rej_len = 0; out = kmalloc(len, GFP_ATOMIC); if (!out) { dev->stats.rx_dropped++; return; /* out of memory, ignore CR packet */ } for (opt = data; len; len -= opt[1], opt += opt[1]) { if (len < 2 || opt[1] < 2 || len < opt[1]) goto err_out; if (pid == PID_LCP) switch (opt[0]) { case LCP_OPTION_MRU: continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ if (opt[1] < sizeof(valid_accm)) goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; if (!rej_len) { /* NAK it */ memcpy(out + nak_len, valid_accm, sizeof(valid_accm)); nak_len += sizeof(valid_accm); continue; } break; case LCP_OPTION_MAGIC: if (len < 6) goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ continue; } /* reject this option */ memcpy(out + rej_len, opt, opt[1]); rej_len += opt[1]; } if (rej_len) ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out); else if (nak_len) ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out); else ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); return; err_out: dev->stats.rx_errors++; kfree(out); } static int ppp_rx(struct sk_buff *skb) { struct hdlc_header *hdr = (struct hdlc_header *)skb->data; struct net_device *dev = skb->dev; struct ppp *ppp = get_ppp(dev); struct proto *proto; struct cp_header *cp; unsigned long flags; unsigned int len; u16 pid; #if DEBUG_CP int i; char *ptr; #endif spin_lock_irqsave(&ppp->lock, flags); /* Check HDLC header */ if (skb->len < sizeof(struct hdlc_header)) goto rx_error; cp = skb_pull(skb, sizeof(struct hdlc_header)); if (hdr->address != HDLC_ADDR_ALLSTATIONS || hdr->control != HDLC_CTRL_UI) goto rx_error; pid = ntohs(hdr->protocol); proto = get_proto(dev, pid); if (!proto) { if (ppp->protos[IDX_LCP].state == OPENED) ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ, ++ppp->seq, skb->len + 2, &hdr->protocol); goto rx_error; } len = ntohs(cp->len); if (len < sizeof(struct cp_header) /* no complete CP header? */ || skb->len < len /* truncated packet? */) goto rx_error; skb_pull(skb, sizeof(struct cp_header)); len -= sizeof(struct cp_header); /* HDLC and CP headers stripped from skb */ #if DEBUG_CP if (cp->code < CP_CODES) sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code], cp->id); else sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id); ptr = debug_buffer + strlen(debug_buffer); for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) { sprintf(ptr, " %02X", skb->data[i]); ptr += strlen(ptr); } printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid), debug_buffer); #endif /* LCP only */ if (pid == PID_LCP) switch (cp->code) { case LCP_PROTO_REJ: pid = ntohs(*(__be16 *)skb->data); if (pid == PID_LCP || pid == PID_IPCP || pid == PID_IPV6CP) ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); goto out; case LCP_ECHO_REQ: /* send Echo-Reply */ if (len >= 4 && proto->state == OPENED) ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY, cp->id, len - 4, skb->data + 4); goto out; case LCP_ECHO_REPLY: if (cp->id == ppp->echo_id) ppp->last_pong = jiffies; goto out; case LCP_DISC_REQ: /* discard */ goto out; } /* LCP, IPCP and IPV6CP */ switch (cp->code) { case CP_CONF_REQ: ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data); break; case CP_CONF_ACK: if (cp->id == proto->cr_id) ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL); break; case CP_CONF_REJ: case CP_CONF_NAK: if (cp->id == proto->cr_id) ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL); break; case CP_TERM_REQ: ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL); break; case CP_TERM_ACK: ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL); break; case CP_CODE_REJ: ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); break; default: len += sizeof(struct cp_header); if (len > dev->mtu) len = dev->mtu; ppp_cp_event(dev, pid, RUC, 0, 0, len, cp); break; } goto out; rx_error: dev->stats.rx_errors++; out: spin_unlock_irqrestore(&ppp->lock, flags); dev_kfree_skb_any(skb); ppp_tx_flush(); return NET_RX_DROP; } static void ppp_timer(struct timer_list *t) { struct proto *proto = from_timer(proto, t, timer); struct ppp *ppp = get_ppp(proto->dev); unsigned long flags; spin_lock_irqsave(&ppp->lock, flags); /* mod_timer could be called after we entered this function but * before we got the lock. */ if (timer_pending(&proto->timer)) { spin_unlock_irqrestore(&ppp->lock, flags); return; } switch (proto->state) { case STOPPING: case REQ_SENT: case ACK_RECV: case ACK_SENT: if (proto->restart_counter) { ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; } else if (netif_carrier_ok(proto->dev)) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; case OPENED: if (proto->pid != PID_LCP) break; if (time_after(jiffies, ppp->last_pong + ppp->keepalive_timeout * HZ)) { netdev_info(proto->dev, "Link down\n"); ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL); ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL); } else { /* send keep-alive packet */ ppp->echo_id = ++ppp->seq; ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ, ppp->echo_id, 0, NULL); proto->timer.expires = jiffies + ppp->keepalive_interval * HZ; add_timer(&proto->timer); } break; } spin_unlock_irqrestore(&ppp->lock, flags); ppp_tx_flush(); } static void ppp_start(struct net_device *dev) { struct ppp *ppp = get_ppp(dev); int i; for (i = 0; i < IDX_COUNT; i++) { struct proto *proto = &ppp->protos[i]; proto->dev = dev; timer_setup(&proto->timer, ppp_timer, 0); proto->state = CLOSED; } ppp->protos[IDX_LCP].pid = PID_LCP; ppp->protos[IDX_IPCP].pid = PID_IPCP; ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP; ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL); } static void ppp_stop(struct net_device *dev) { ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); } static void ppp_close(struct net_device *dev) { ppp_tx_flush(); } static struct hdlc_proto proto = { .start = ppp_start, .stop = ppp_stop, .close = ppp_close, .type_trans = ppp_type_trans, .ioctl = ppp_ioctl, .netif_rx = ppp_rx, .module = THIS_MODULE, }; static const struct header_ops ppp_header_ops = { .create = ppp_hard_header, }; static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs) { hdlc_device *hdlc = dev_to_hdlc(dev); struct ppp *ppp; int result; switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; ifs->type = IF_PROTO_PPP; return 0; /* return protocol only, no settable parameters */ case IF_PROTO_PPP: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; /* no settable parameters */ result = hdlc->attach(dev, ENCODING_NRZ, PARITY_CRC16_PR1_CCITT); if (result) return result; result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp)); if (result) return result; ppp = get_ppp(dev); spin_lock_init(&ppp->lock); ppp->req_timeout = 2; ppp->cr_retries = 10; ppp->term_retries = 2; ppp->keepalive_interval = 10; ppp->keepalive_timeout = 60; dev->hard_header_len = sizeof(struct hdlc_header); dev->header_ops = &ppp_header_ops; dev->type = ARPHRD_PPP; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); return 0; } return -EINVAL; } static int __init hdlc_ppp_init(void) { skb_queue_head_init(&tx_queue); register_hdlc_protocol(&proto); return 0; } static void __exit hdlc_ppp_exit(void) { unregister_hdlc_protocol(&proto); } module_init(hdlc_ppp_init); module_exit(hdlc_ppp_exit); MODULE_AUTHOR("Krzysztof Halasa <[email protected]>"); MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _AS5011_H #define _AS5011_H /* * Copyright (c) 2010, 2011 Fabien Marteau <[email protected]> */ struct as5011_platform_data { unsigned int axis_irq; /* irq number */ unsigned long axis_irqflags; char xp, xn; /* threshold for x axis */ char yp, yn; /* threshold for y axis */ }; #endif /* _AS5011_H */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_VMXFEATURES_H #define _ASM_X86_VMXFEATURES_H /* * Defines VMX CPU feature bits */ #define NVMXINTS 5 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used * in /proc/cpuinfo instead of the macro name. Otherwise, this feature bit * is not displayed in /proc/cpuinfo at all. */ /* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */ #define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* VM-Exit on vectored interrupts */ #define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* VM-Exit on NMIs */ #define VMX_FEATURE_VIRTUAL_NMIS ( 0*32+ 5) /* "vnmi" NMI virtualization */ #define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* "preemption_timer" VMX Preemption Timer */ #define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* "posted_intr" Posted Interrupts */ /* EPT/VPID features, scattered to bits 16-23 */ #define VMX_FEATURE_INVVPID ( 0*32+ 16) /* "invvpid" INVVPID is supported */ #define VMX_FEATURE_EPT_EXECUTE_ONLY ( 0*32+ 17) /* "ept_x_only" EPT entries can be execute only */ #define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* "ept_ad" EPT Accessed/Dirty bits */ #define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* "ept_1gb" 1GB EPT pages */ #define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* "ept_5level" 5-level EPT paging */ /* Aggregated APIC features 24-27 */ #define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* "flexpriority" TPR shadow + virt APIC */ #define VMX_FEATURE_APICV ( 0*32+ 25) /* "apicv" TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */ /* VM-Functions, shifted to bits 28-31 */ #define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* "eptp_switching" EPTP switching (in guest) */ /* Primary Processor-Based VM-Execution Controls, word 1 */ #define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* VM-Exit if INTRs are unblocked in guest */ #define VMX_FEATURE_USE_TSC_OFFSETTING ( 1*32+ 3) /* "tsc_offset" Offset hardware TSC when read in guest */ #define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* VM-Exit on HLT */ #define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* VM-Exit on INVLPG */ #define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* VM-Exit on MWAIT */ #define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* VM-Exit on RDPMC */ #define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* VM-Exit on RDTSC */ #define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* VM-Exit on writes to CR3 */ #define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* VM-Exit on reads from CR3 */ #define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* Enable Tertiary VM-Execution Controls */ #define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* VM-Exit on writes to CR8 */ #define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* VM-Exit on reads from CR8 */ #define VMX_FEATURE_VIRTUAL_TPR ( 1*32+ 21) /* "vtpr" TPR virtualization, a.k.a. TPR shadow */ #define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* VM-Exit if NMIs are unblocked in guest */ #define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* VM-Exit on accesses to debug registers */ #define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* VM-Exit on *all* IN{S} and OUT{S}*/ #define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* VM-Exit based on I/O port */ #define VMX_FEATURE_MONITOR_TRAP_FLAG ( 1*32+ 27) /* "mtf" VMX single-step VM-Exits */ #define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* VM-Exit based on MSR index */ #define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* VM-Exit on MONITOR (MWAIT's accomplice) */ #define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* VM-Exit on PAUSE (unconditionally) */ #define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* Enable Secondary VM-Execution Controls */ /* Secondary Processor-Based VM-Execution Controls, word 2 */ #define VMX_FEATURE_VIRT_APIC_ACCESSES ( 2*32+ 0) /* "vapic" Virtualize memory mapped APIC accesses */ #define VMX_FEATURE_EPT ( 2*32+ 1) /* "ept" Extended Page Tables, a.k.a. Two-Dimensional Paging */ #define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* VM-Exit on {S,L}*DT instructions */ #define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* Enable RDTSCP in guest */ #define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* Virtualize X2APIC for the guest */ #define VMX_FEATURE_VPID ( 2*32+ 5) /* "vpid" Virtual Processor ID (TLB ASID modifier) */ #define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* VM-Exit on WBINVD */ #define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* "unrestricted_guest" Allow Big Real Mode and other "invalid" states */ #define VMX_FEATURE_APIC_REGISTER_VIRT ( 2*32+ 8) /* "vapic_reg" Hardware emulation of reads to the virtual-APIC */ #define VMX_FEATURE_VIRT_INTR_DELIVERY ( 2*32+ 9) /* "vid" Evaluation and delivery of pending virtual interrupts */ #define VMX_FEATURE_PAUSE_LOOP_EXITING ( 2*32+ 10) /* "ple" Conditionally VM-Exit on PAUSE at CPL0 */ #define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* VM-Exit on RDRAND*/ #define VMX_FEATURE_INVPCID ( 2*32+ 12) /* Enable INVPCID in guest */ #define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* Enable VM-Functions (leaf dependent) */ #define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* "shadow_vmcs" VMREAD/VMWRITE in guest can access shadow VMCS */ #define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* VM-Exit on ENCLS (leaf dependent) */ #define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* VM-Exit on RDSEED */ #define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */ #define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "ept_violation_ve" Conditionally reflect EPT violations as #VE exceptions */ #define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* Suppress VMX indicators in Processor Trace */ #define VMX_FEATURE_XSAVES ( 2*32+ 20) /* Enable XSAVES and XRSTORS in guest */ #define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */ #define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* Processor Trace logs GPAs */ #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* "tsc_scaling" Scale hardware TSC when read in guest */ #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* "usr_wait_pause" Enable TPAUSE, UMONITOR, UMWAIT in guest */ #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* VM-Exit on ENCLV (leaf dependent) */ #define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* VM-Exit when bus lock caused */ #define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* "notify_vm_exiting" VM-Exit when no event windows after notify window */ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* "ipi_virt" Enable IPI virtualization */ #endif /* _ASM_X86_VMXFEATURES_H */
// SPDX-License-Identifier: GPL-2.0-only /* * at91sam9g25.dtsi - Device Tree Include file for AT91SAM9G25 SoC * * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <[email protected]> */ #include "at91sam9x5.dtsi" #include "at91sam9x5_isi.dtsi" #include "at91sam9x5_usart3.dtsi" #include "at91sam9x5_macb0.dtsi" / { model = "Atmel AT91SAM9G25 SoC"; compatible = "atmel,at91sam9g25", "atmel,at91sam9x5"; ahb { apb { pinctrl@fffff400 { atmel,mux-mask = < /* A B C */ 0xffffffff 0xffe0399f 0xc000001c /* pioA */ 0x0007ffff 0x00047e3f 0x00000000 /* pioB */ 0x80000000 0x07c0ffff 0xb83fffff /* pioC */ 0x003fffff 0x003f8000 0x00000000 /* pioD */ >; }; pmc: clock-controller@fffffc00 { compatible = "atmel,at91sam9g25-pmc", "atmel,at91sam9x5-pmc", "syscon"; }; }; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <dt-bindings/clock/qcom,sa8775p-dispcc.h> #include "clk-alpha-pll.h" #include "clk-branch.h" #include "clk-pll.h" #include "clk-rcg.h" #include "clk-regmap.h" #include "clk-regmap-divider.h" #include "clk-regmap-mux.h" #include "common.h" #include "gdsc.h" #include "reset.h" enum { DT_IFACE, DT_BI_TCXO, DT_BI_TCXO_AO, DT_SLEEP_CLK, DT_DP0_PHY_PLL_LINK_CLK, DT_DP0_PHY_PLL_VCO_DIV_CLK, DT_DP1_PHY_PLL_LINK_CLK, DT_DP1_PHY_PLL_VCO_DIV_CLK, DT_DSI0_PHY_PLL_OUT_BYTECLK, DT_DSI0_PHY_PLL_OUT_DSICLK, DT_DSI1_PHY_PLL_OUT_BYTECLK, DT_DSI1_PHY_PLL_OUT_DSICLK, }; enum { P_BI_TCXO, P_DP0_PHY_PLL_LINK_CLK, P_DP0_PHY_PLL_VCO_DIV_CLK, P_DP1_PHY_PLL_LINK_CLK, P_DP1_PHY_PLL_VCO_DIV_CLK, P_DSI0_PHY_PLL_OUT_BYTECLK, P_DSI0_PHY_PLL_OUT_DSICLK, P_DSI1_PHY_PLL_OUT_BYTECLK, P_DSI1_PHY_PLL_OUT_DSICLK, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, P_SLEEP_CLK, }; static const struct pll_vco lucid_evo_vco[] = { { 249600000, 2020000000, 0 }, }; static const struct alpha_pll_config mdss_1_disp_cc_pll0_config = { .l = 0x3a, .alpha = 0x9800, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x32aa299c, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00400805, }; static struct clk_alpha_pll mdss_1_disp_cc_pll0 = { .offset = 0x0, .vco_table = lucid_evo_vco, .num_vco = ARRAY_SIZE(lucid_evo_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_pll0", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_lucid_evo_ops, }, }, }; static const struct alpha_pll_config mdss_1_disp_cc_pll1_config = { .l = 0x1f, .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x32aa299c, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00400805, }; static struct clk_alpha_pll mdss_1_disp_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_evo_vco, .num_vco = ARRAY_SIZE(lucid_evo_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_pll1", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_lucid_evo_ops, }, }, }; static const struct parent_map disp_cc_1_parent_map_0[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, }; static const struct clk_parent_data disp_cc_1_parent_data_0[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, }; static const struct parent_map disp_cc_1_parent_map_1[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_1_parent_data_1[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_1_parent_map_2[] = { { P_BI_TCXO, 0 }, }; static const struct clk_parent_data disp_cc_1_parent_data_2[] = { { .index = DT_BI_TCXO }, }; static const struct clk_parent_data disp_cc_1_parent_data_2_ao[] = { { .index = DT_BI_TCXO_AO }, }; static const struct parent_map disp_cc_1_parent_map_3[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP1_PHY_PLL_LINK_CLK, 2 }, }; static const struct clk_parent_data disp_cc_1_parent_data_3[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP1_PHY_PLL_LINK_CLK }, }; static const struct parent_map disp_cc_1_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_1_parent_data_4[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_1_parent_map_5[] = { { P_BI_TCXO, 0 }, { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_1_parent_data_5[] = { { .index = DT_BI_TCXO }, { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_1_parent_map_6[] = { { P_BI_TCXO, 0 }, { P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 1 }, { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_1_parent_data_6[] = { { .index = DT_BI_TCXO }, { .hw = &mdss_1_disp_cc_pll0.clkr.hw }, { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, { .hw = &mdss_1_disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_1_parent_map_7[] = { { P_SLEEP_CLK, 0 }, }; static const struct clk_parent_data disp_cc_1_parent_data_7_ao[] = { { .index = DT_SLEEP_CLK }, }; static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_ahb_clk_src[] = { F(37500000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), F(75000000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), { } }; static struct clk_rcg2 mdss_1_disp_cc_mdss_ahb_clk_src = { .cmd_rcgr = 0x824c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_5, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_ahb_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_ahb_clk_src", .parent_data = disp_cc_1_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_5), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_byte0_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), { } }; static struct clk_rcg2 mdss_1_disp_cc_mdss_byte0_clk_src = { .cmd_rcgr = 0x80ec, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_1, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte0_clk_src", .parent_data = disp_cc_1_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_byte1_clk_src = { .cmd_rcgr = 0x8108, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_1, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte1_clk_src", .parent_data = disp_cc_1_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_aux_clk_src = { .cmd_rcgr = 0x81b8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_2, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk_src", .parent_data = disp_cc_1_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_crypto_clk_src = { .cmd_rcgr = 0x8170, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_3, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk_src", .parent_data = disp_cc_1_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x8154, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_3, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_1_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src = { .cmd_rcgr = 0x8188, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src = { .cmd_rcgr = 0x81a0, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src = { .cmd_rcgr = 0x826c, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src = { .cmd_rcgr = 0x8284, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_aux_clk_src = { .cmd_rcgr = 0x8234, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_2, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk_src", .parent_data = disp_cc_1_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_crypto_clk_src = { .cmd_rcgr = 0x821c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_3, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk_src", .parent_data = disp_cc_1_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_link_clk_src = { .cmd_rcgr = 0x8200, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_3, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_1_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src = { .cmd_rcgr = 0x81d0, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src = { .cmd_rcgr = 0x81e8, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_1_parent_map_0, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src", .parent_data = disp_cc_1_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_esc0_clk_src = { .cmd_rcgr = 0x8124, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_4, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_esc0_clk_src", .parent_data = disp_cc_1_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_esc1_clk_src = { .cmd_rcgr = 0x813c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_4, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_esc1_clk_src", .parent_data = disp_cc_1_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_mdp_clk_src[] = { F(375000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(500000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(575000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(650000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), { } }; static struct clk_rcg2 mdss_1_disp_cc_mdss_mdp_clk_src = { .cmd_rcgr = 0x80bc, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_6, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_mdp_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_mdp_clk_src", .parent_data = disp_cc_1_parent_data_6, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_6), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk0_clk_src = { .cmd_rcgr = 0x808c, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_1_parent_map_1, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_1_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk1_clk_src = { .cmd_rcgr = 0x80a4, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_1_parent_map_1, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_pclk1_clk_src", .parent_data = disp_cc_1_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_mdss_vsync_clk_src = { .cmd_rcgr = 0x80d4, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_2, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_vsync_clk_src", .parent_data = disp_cc_1_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static const struct freq_tbl ftbl_mdss_1_disp_cc_sleep_clk_src[] = { F(32000, P_SLEEP_CLK, 1, 0, 0), { } }; static struct clk_rcg2 mdss_1_disp_cc_sleep_clk_src = { .cmd_rcgr = 0xc058, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_7, .freq_tbl = ftbl_mdss_1_disp_cc_sleep_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_sleep_clk_src", .parent_data = disp_cc_1_parent_data_7_ao, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_7_ao), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 mdss_1_disp_cc_xo_clk_src = { .cmd_rcgr = 0xc03c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_1_parent_map_2, .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_xo_clk_src", .parent_data = disp_cc_1_parent_data_2_ao, .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2_ao), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_regmap_div mdss_1_disp_cc_mdss_byte0_div_clk_src = { .reg = 0x8104, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte0_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ops, }, }; static struct clk_regmap_div mdss_1_disp_cc_mdss_byte1_div_clk_src = { .reg = 0x8120, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte1_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ops, }, }; static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx0_link_div_clk_src = { .reg = 0x816c, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx1_link_div_clk_src = { .reg = 0x8218, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_branch mdss_1_disp_cc_mdss_ahb1_clk = { .halt_reg = 0x8088, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8088, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_ahb1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_ahb_clk = { .halt_reg = 0x8084, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8084, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_byte0_clk = { .halt_reg = 0x8034, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8034, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte0_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_byte0_intf_clk = { .halt_reg = 0x8038, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8038, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte0_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_byte1_clk = { .halt_reg = 0x803c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x803c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_byte1_intf_clk = { .halt_reg = 0x8040, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8040, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_byte1_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_aux_clk = { .halt_reg = 0x805c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x805c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_crypto_clk = { .halt_reg = 0x8058, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8058, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_clk = { .halt_reg = 0x804c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x804c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_link_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_intf_clk = { .halt_reg = 0x8050, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8050, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel0_clk = { .halt_reg = 0x8060, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8060, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel1_clk = { .halt_reg = 0x8064, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8064, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel2_clk = { .halt_reg = 0x8264, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8264, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel3_clk = { .halt_reg = 0x8268, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8268, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk = { .halt_reg = 0x8054, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8054, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_aux_clk = { .halt_reg = 0x8080, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8080, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_crypto_clk = { .halt_reg = 0x807c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x807c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_clk = { .halt_reg = 0x8070, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8070, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_link_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_intf_clk = { .halt_reg = 0x8074, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8074, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel0_clk = { .halt_reg = 0x8068, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8068, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel1_clk = { .halt_reg = 0x806c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x806c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk = { .halt_reg = 0x8078, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8078, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_esc0_clk = { .halt_reg = 0x8044, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8044, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_esc0_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_esc0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_esc1_clk = { .halt_reg = 0x8048, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8048, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_esc1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_esc1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_mdp1_clk = { .halt_reg = 0x8014, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8014, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_mdp1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_mdp_clk = { .halt_reg = 0x800c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x800c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_mdp_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut1_clk = { .halt_reg = 0x8024, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x8024, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_mdp_lut1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut_clk = { .halt_reg = 0x801c, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x801c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_mdp_lut_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_non_gdsc_ahb_clk = { .halt_reg = 0xa004, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0xa004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_non_gdsc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_pclk0_clk = { .halt_reg = 0x8004, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_pclk0_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_pclk1_clk = { .halt_reg = 0x8008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_pclk1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_pll_lock_monitor_clk = { .halt_reg = 0xe000, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xe000, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_pll_lock_monitor_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_rscc_ahb_clk = { .halt_reg = 0xa00c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa00c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_rscc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_rscc_vsync_clk = { .halt_reg = 0xa008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_rscc_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_vsync1_clk = { .halt_reg = 0x8030, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8030, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_vsync1_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_mdss_vsync_clk = { .halt_reg = 0x802c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x802c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_mdss_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_1_disp_cc_sm_obs_clk = { .halt_reg = 0x11014, .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x11014, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "mdss_1_disp_cc_sm_obs_clk", .ops = &clk_branch2_ops, }, }, }; static struct gdsc mdss_1_disp_cc_mdss_core_gdsc = { .gdscr = 0x9000, .en_rest_wait_val = 0x2, .en_few_wait_val = 0x2, .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_1_disp_cc_mdss_core_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, }; static struct gdsc mdss_1_disp_cc_mdss_core_int2_gdsc = { .gdscr = 0xd000, .en_rest_wait_val = 0x2, .en_few_wait_val = 0x2, .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_1_disp_cc_mdss_core_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, }; static struct clk_regmap *disp_cc_1_sa8775p_clocks[] = { [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_1_disp_cc_mdss_ahb1_clk.clkr, [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_1_disp_cc_mdss_ahb_clk.clkr, [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_1_disp_cc_mdss_ahb_clk_src.clkr, [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_1_disp_cc_mdss_byte0_clk.clkr, [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_clk_src.clkr, [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr, [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_1_disp_cc_mdss_byte0_intf_clk.clkr, [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_1_disp_cc_mdss_byte1_clk.clkr, [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_clk_src.clkr, [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr, [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_1_disp_cc_mdss_byte1_intf_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx0_aux_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_intf_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx1_aux_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_intf_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk.clkr, [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr, [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_1_disp_cc_mdss_esc0_clk.clkr, [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_1_disp_cc_mdss_esc0_clk_src.clkr, [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_1_disp_cc_mdss_esc1_clk.clkr, [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_1_disp_cc_mdss_esc1_clk_src.clkr, [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_1_disp_cc_mdss_mdp1_clk.clkr, [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_1_disp_cc_mdss_mdp_clk.clkr, [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_1_disp_cc_mdss_mdp_clk_src.clkr, [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_1_disp_cc_mdss_mdp_lut1_clk.clkr, [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_1_disp_cc_mdss_mdp_lut_clk.clkr, [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_1_disp_cc_mdss_non_gdsc_ahb_clk.clkr, [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_1_disp_cc_mdss_pclk0_clk.clkr, [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr, [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_1_disp_cc_mdss_pclk1_clk.clkr, [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr, [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_1_disp_cc_mdss_pll_lock_monitor_clk.clkr, [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_1_disp_cc_mdss_rscc_ahb_clk.clkr, [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_1_disp_cc_mdss_rscc_vsync_clk.clkr, [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_1_disp_cc_mdss_vsync1_clk.clkr, [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_1_disp_cc_mdss_vsync_clk.clkr, [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_1_disp_cc_mdss_vsync_clk_src.clkr, [MDSS_DISP_CC_PLL0] = &mdss_1_disp_cc_pll0.clkr, [MDSS_DISP_CC_PLL1] = &mdss_1_disp_cc_pll1.clkr, [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_1_disp_cc_sleep_clk_src.clkr, [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_1_disp_cc_sm_obs_clk.clkr, [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_1_disp_cc_xo_clk_src.clkr, }; static struct gdsc *disp_cc_1_sa8775p_gdscs[] = { [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_1_disp_cc_mdss_core_gdsc, [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_1_disp_cc_mdss_core_int2_gdsc, }; static const struct qcom_reset_map disp_cc_1_sa8775p_resets[] = { [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 }, }; static const struct regmap_config disp_cc_1_sa8775p_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x12414, .fast_io = true, }; static struct qcom_cc_desc disp_cc_1_sa8775p_desc = { .config = &disp_cc_1_sa8775p_regmap_config, .clks = disp_cc_1_sa8775p_clocks, .num_clks = ARRAY_SIZE(disp_cc_1_sa8775p_clocks), .resets = disp_cc_1_sa8775p_resets, .num_resets = ARRAY_SIZE(disp_cc_1_sa8775p_resets), .gdscs = disp_cc_1_sa8775p_gdscs, .num_gdscs = ARRAY_SIZE(disp_cc_1_sa8775p_gdscs), }; static const struct of_device_id disp_cc_1_sa8775p_match_table[] = { { .compatible = "qcom,sa8775p-dispcc1" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_1_sa8775p_match_table); static int disp_cc_1_sa8775p_probe(struct platform_device *pdev) { struct regmap *regmap; int ret; ret = devm_pm_runtime_enable(&pdev->dev); if (ret) return ret; ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) return ret; regmap = qcom_cc_map(pdev, &disp_cc_1_sa8775p_desc); if (IS_ERR(regmap)) { pm_runtime_put(&pdev->dev); return PTR_ERR(regmap); } clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll0, regmap, &mdss_1_disp_cc_pll0_config); clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll1, regmap, &mdss_1_disp_cc_pll1_config); /* Keep some clocks always enabled */ qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_1_DISP_CC_SLEEP_CLK */ qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_1_DISP_CC_XO_CLK */ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_1_sa8775p_desc, regmap); pm_runtime_put(&pdev->dev); return ret; } static struct platform_driver disp_cc_1_sa8775p_driver = { .probe = disp_cc_1_sa8775p_probe, .driver = { .name = "dispcc1-sa8775p", .of_match_table = disp_cc_1_sa8775p_match_table, }, }; module_platform_driver(disp_cc_1_sa8775p_driver); MODULE_DESCRIPTION("QTI DISPCC1 SA8775P Driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/atomic.h> #include <linux/inetdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_nat_masquerade.h> struct masq_dev_work { struct work_struct work; struct net *net; netns_tracker ns_tracker; union nf_inet_addr addr; int ifindex; int (*iter)(struct nf_conn *i, void *data); }; #define MAX_MASQ_WORKER_COUNT 16 static DEFINE_MUTEX(masq_mutex); static unsigned int masq_refcnt __read_mostly; static atomic_t masq_worker_count __read_mostly; unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range2 newrange; const struct rtable *rt; __be32 newsrc, nh; WARN_ON(hooknum != NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static void iterate_cleanup_work(struct work_struct *work) { struct nf_ct_iter_data iter_data = {}; struct masq_dev_work *w; w = container_of(work, struct masq_dev_work, work); iter_data.net = w->net; iter_data.data = (void *)w; nf_ct_iterate_cleanup_net(w->iter, &iter_data); put_net_track(w->net, &w->ns_tracker); kfree(w); atomic_dec(&masq_worker_count); module_put(THIS_MODULE); } /* Iterate conntrack table in the background and remove conntrack entries * that use the device/address being removed. * * In case too many work items have been queued already or memory allocation * fails iteration is skipped, conntrack entries will time out eventually. */ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, int ifindex, int (*iter)(struct nf_conn *i, void *data), gfp_t gfp_flags) { struct masq_dev_work *w; if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) return; net = maybe_get_net(net); if (!net) return; if (!try_module_get(THIS_MODULE)) goto err_module; w = kzalloc(sizeof(*w), gfp_flags); if (w) { /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ atomic_inc(&masq_worker_count); INIT_WORK(&w->work, iterate_cleanup_work); w->ifindex = ifindex; w->net = net; netns_tracker_alloc(net, &w->ns_tracker, gfp_flags); w->iter = iter; if (addr) w->addr = *addr; schedule_work(&w->work); return; } module_put(THIS_MODULE); err_module: put_net(net); } static int device_cmp(struct nf_conn *i, void *arg) { const struct nf_conn_nat *nat = nfct_nat(i); const struct masq_dev_work *w = arg; if (!nat) return 0; return nat->masq_index == w->ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for * conntracks which were associated with that device, * and forget them. */ nf_nat_masq_schedule(net, NULL, dev->ifindex, device_cmp, GFP_KERNEL); } return NOTIFY_DONE; } static int inet_cmp(struct nf_conn *ct, void *ptr) { struct nf_conntrack_tuple *tuple; struct masq_dev_work *w = ptr; if (!device_cmp(ct, ptr)) return 0; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct in_ifaddr *ifa = ptr; const struct in_device *idev; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; /* The masq_dev_notifier will catch the case of the device going * down. So if the inetdev is dead and being destroyed we have * no work to do. Otherwise this is an individual address removal * and we have to perform the flush. */ idev = ifa->ifa_dev; if (idev->dead) return NOTIFY_DONE; memset(&addr, 0, sizeof(addr)); addr.ip = ifa->ifa_address; dev = idev->dev; nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, inet_cmp, GFP_KERNEL); return NOTIFY_DONE; } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; #if IS_ENABLED(CONFIG_IPV6) static int nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr) { #ifdef CONFIG_IPV6_MODULE const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr); #else return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr); #endif } unsigned int nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, const struct net_device *out) { enum ip_conntrack_info ctinfo; struct nf_conn_nat *nat; struct in6_addr src; struct nf_conn *ct; struct nf_nat_range2 newrange; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, &ipv6_hdr(skb)->daddr, 0, &src) < 0) return NF_DROP; nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = src; newrange.max_addr.in6 = src; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). * * Defer it to the system workqueue. * * As we can have 'a lot' of inet_events (depending on amount of ipv6 * addresses being deleted), we also need to limit work item queue. */ static int masq_inet6_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; dev = ifa->idev->dev; memset(&addr, 0, sizeof(addr)); addr.in6 = ifa->addr; nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, GFP_ATOMIC); return NOTIFY_DONE; } static struct notifier_block masq_inet6_notifier = { .notifier_call = masq_inet6_event, }; static int nf_nat_masquerade_ipv6_register_notifier(void) { return register_inet6addr_notifier(&masq_inet6_notifier); } #else static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; } #endif int nf_nat_masquerade_inet_register_notifiers(void) { int ret = 0; mutex_lock(&masq_mutex); if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) { ret = -EOVERFLOW; goto out_unlock; } /* check if the notifier was already set */ if (++masq_refcnt > 1) goto out_unlock; /* Register for device down reports */ ret = register_netdevice_notifier(&masq_dev_notifier); if (ret) goto err_dec; /* Register IP address change reports */ ret = register_inetaddr_notifier(&masq_inet_notifier); if (ret) goto err_unregister; ret = nf_nat_masquerade_ipv6_register_notifier(); if (ret) goto err_unreg_inet; mutex_unlock(&masq_mutex); return ret; err_unreg_inet: unregister_inetaddr_notifier(&masq_inet_notifier); err_unregister: unregister_netdevice_notifier(&masq_dev_notifier); err_dec: masq_refcnt--; out_unlock: mutex_unlock(&masq_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers); void nf_nat_masquerade_inet_unregister_notifiers(void) { mutex_lock(&masq_mutex); /* check if the notifiers still have clients */ if (--masq_refcnt > 0) goto out_unlock; unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&masq_inet6_notifier); #endif out_unlock: mutex_unlock(&masq_mutex); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers);
/* SPDX-License-Identifier: GPL-2.0 */ /* internal file - do not include directly */ #ifdef CONFIG_NET BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp, struct xdp_md, struct xdp_buff) #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock, struct bpf_sock, struct sock) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr, struct bpf_sock_addr, struct bpf_sock_addr_kern) #endif BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops, struct bpf_sock_ops, struct bpf_sock_ops_kern) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb, struct __sk_buff, struct sk_buff) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg, struct sk_msg_md, struct sk_msg) BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector, struct __sk_buff, struct bpf_flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe, bpf_user_pt_regs_t, struct pt_regs) BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint, __u64, u64) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event, struct bpf_perf_event_data, struct bpf_perf_event_data_kern) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint, struct bpf_raw_tracepoint_args, u64) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable, struct bpf_raw_tracepoint_args, u64) BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing, void *, void *) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev, struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl, struct bpf_sysctl, struct bpf_sysctl_kern) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt, struct bpf_sockopt, struct bpf_sockopt_kern) #endif #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, __u32, u32) #endif #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, struct sk_reuseport_md, struct sk_reuseport_kern) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup, struct bpf_sk_lookup, struct bpf_sk_lookup_kern) #endif #if defined(CONFIG_BPF_JIT) BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, void *, void *) BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension, void *, void *) #ifdef CONFIG_BPF_LSM BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, void *, void *) #endif /* CONFIG_BPF_LSM */ #endif BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall, void *, void *) #ifdef CONFIG_NETFILTER_BPF_LINK BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter, struct bpf_nf_ctx, struct bpf_nf_ctx) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) #ifdef CONFIG_CGROUPS BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops) #endif #ifdef CONFIG_CGROUP_BPF BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) #ifdef CONFIG_PERF_EVENTS BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_BPF_LSM BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) #endif #ifdef CONFIG_INET BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif #endif BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) #if defined(CONFIG_BPF_JIT) BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops) BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing) #ifdef CONFIG_CGROUP_BPF BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) #endif BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter) BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx) BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit) BPF_LINK_TYPE(BPF_LINK_TYPE_SOCKMAP, sockmap) #endif #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops) BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale LINFlexD UART serial port driver * * Copyright 2012-2016 Freescale Semiconductor, Inc. * Copyright 2017-2019 NXP */ #include <linux/console.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/slab.h> #include <linux/tty_flip.h> #include <linux/delay.h> /* All registers are 32-bit width */ #define LINCR1 0x0000 /* LIN control register */ #define LINIER 0x0004 /* LIN interrupt enable register */ #define LINSR 0x0008 /* LIN status register */ #define LINESR 0x000C /* LIN error status register */ #define UARTCR 0x0010 /* UART mode control register */ #define UARTSR 0x0014 /* UART mode status register */ #define LINTCSR 0x0018 /* LIN timeout control status register */ #define LINOCR 0x001C /* LIN output compare register */ #define LINTOCR 0x0020 /* LIN timeout control register */ #define LINFBRR 0x0024 /* LIN fractional baud rate register */ #define LINIBRR 0x0028 /* LIN integer baud rate register */ #define LINCFR 0x002C /* LIN checksum field register */ #define LINCR2 0x0030 /* LIN control register 2 */ #define BIDR 0x0034 /* Buffer identifier register */ #define BDRL 0x0038 /* Buffer data register least significant */ #define BDRM 0x003C /* Buffer data register most significant */ #define IFER 0x0040 /* Identifier filter enable register */ #define IFMI 0x0044 /* Identifier filter match index */ #define IFMR 0x0048 /* Identifier filter mode register */ #define GCR 0x004C /* Global control register */ #define UARTPTO 0x0050 /* UART preset timeout register */ #define UARTCTO 0x0054 /* UART current timeout register */ /* * Register field definitions */ #define LINFLEXD_LINCR1_INIT BIT(0) #define LINFLEXD_LINCR1_MME BIT(4) #define LINFLEXD_LINCR1_BF BIT(7) #define LINFLEXD_LINSR_LINS_INITMODE BIT(12) #define LINFLEXD_LINSR_LINS_MASK (0xF << 12) #define LINFLEXD_LINIER_SZIE BIT(15) #define LINFLEXD_LINIER_OCIE BIT(14) #define LINFLEXD_LINIER_BEIE BIT(13) #define LINFLEXD_LINIER_CEIE BIT(12) #define LINFLEXD_LINIER_HEIE BIT(11) #define LINFLEXD_LINIER_FEIE BIT(8) #define LINFLEXD_LINIER_BOIE BIT(7) #define LINFLEXD_LINIER_LSIE BIT(6) #define LINFLEXD_LINIER_WUIE BIT(5) #define LINFLEXD_LINIER_DBFIE BIT(4) #define LINFLEXD_LINIER_DBEIETOIE BIT(3) #define LINFLEXD_LINIER_DRIE BIT(2) #define LINFLEXD_LINIER_DTIE BIT(1) #define LINFLEXD_LINIER_HRIE BIT(0) #define LINFLEXD_UARTCR_OSR_MASK (0xF << 24) #define LINFLEXD_UARTCR_OSR(uartcr) (((uartcr) \ & LINFLEXD_UARTCR_OSR_MASK) >> 24) #define LINFLEXD_UARTCR_ROSE BIT(23) #define LINFLEXD_UARTCR_RFBM BIT(9) #define LINFLEXD_UARTCR_TFBM BIT(8) #define LINFLEXD_UARTCR_WL1 BIT(7) #define LINFLEXD_UARTCR_PC1 BIT(6) #define LINFLEXD_UARTCR_RXEN BIT(5) #define LINFLEXD_UARTCR_TXEN BIT(4) #define LINFLEXD_UARTCR_PC0 BIT(3) #define LINFLEXD_UARTCR_PCE BIT(2) #define LINFLEXD_UARTCR_WL0 BIT(1) #define LINFLEXD_UARTCR_UART BIT(0) #define LINFLEXD_UARTSR_SZF BIT(15) #define LINFLEXD_UARTSR_OCF BIT(14) #define LINFLEXD_UARTSR_PE3 BIT(13) #define LINFLEXD_UARTSR_PE2 BIT(12) #define LINFLEXD_UARTSR_PE1 BIT(11) #define LINFLEXD_UARTSR_PE0 BIT(10) #define LINFLEXD_UARTSR_RMB BIT(9) #define LINFLEXD_UARTSR_FEF BIT(8) #define LINFLEXD_UARTSR_BOF BIT(7) #define LINFLEXD_UARTSR_RPS BIT(6) #define LINFLEXD_UARTSR_WUF BIT(5) #define LINFLEXD_UARTSR_4 BIT(4) #define LINFLEXD_UARTSR_TO BIT(3) #define LINFLEXD_UARTSR_DRFRFE BIT(2) #define LINFLEXD_UARTSR_DTFTFF BIT(1) #define LINFLEXD_UARTSR_NF BIT(0) #define LINFLEXD_UARTSR_PE (LINFLEXD_UARTSR_PE0 |\ LINFLEXD_UARTSR_PE1 |\ LINFLEXD_UARTSR_PE2 |\ LINFLEXD_UARTSR_PE3) #define LINFLEX_LDIV_MULTIPLIER (16) #define DRIVER_NAME "fsl-linflexuart" #define DEV_NAME "ttyLF" #define UART_NR 4 #define EARLYCON_BUFFER_INITIAL_CAP 8 #define PREINIT_DELAY 2000 /* us */ static const struct of_device_id linflex_dt_ids[] = { { .compatible = "fsl,s32v234-linflexuart", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, linflex_dt_ids); #ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE static struct uart_port *earlycon_port; static bool linflex_earlycon_same_instance; static DEFINE_SPINLOCK(init_lock); static bool during_init; static struct { char *content; unsigned int len, cap; } earlycon_buf; #endif static void linflex_stop_tx(struct uart_port *port) { unsigned long ier; ier = readl(port->membase + LINIER); ier &= ~(LINFLEXD_LINIER_DTIE); writel(ier, port->membase + LINIER); } static void linflex_stop_rx(struct uart_port *port) { unsigned long ier; ier = readl(port->membase + LINIER); writel(ier & ~LINFLEXD_LINIER_DRIE, port->membase + LINIER); } static void linflex_put_char(struct uart_port *sport, unsigned char c) { unsigned long status; writeb(c, sport->membase + BDRL); /* Waiting for data transmission completed. */ while (((status = readl(sport->membase + UARTSR)) & LINFLEXD_UARTSR_DTFTFF) != LINFLEXD_UARTSR_DTFTFF) ; writel(status | LINFLEXD_UARTSR_DTFTFF, sport->membase + UARTSR); } static inline void linflex_transmit_buffer(struct uart_port *sport) { struct tty_port *tport = &sport->state->port; unsigned char c; while (uart_fifo_get(sport, &c)) { linflex_put_char(sport, c); sport->icount.tx++; } if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(sport); if (kfifo_is_empty(&tport->xmit_fifo)) linflex_stop_tx(sport); } static void linflex_start_tx(struct uart_port *port) { unsigned long ier; linflex_transmit_buffer(port); ier = readl(port->membase + LINIER); writel(ier | LINFLEXD_LINIER_DTIE, port->membase + LINIER); } static irqreturn_t linflex_txint(int irq, void *dev_id) { struct uart_port *sport = dev_id; struct tty_port *tport = &sport->state->port; unsigned long flags; uart_port_lock_irqsave(sport, &flags); if (sport->x_char) { linflex_put_char(sport, sport->x_char); goto out; } if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(sport)) { linflex_stop_tx(sport); goto out; } linflex_transmit_buffer(sport); out: uart_port_unlock_irqrestore(sport, flags); return IRQ_HANDLED; } static irqreturn_t linflex_rxint(int irq, void *dev_id) { struct uart_port *sport = dev_id; unsigned int flg; struct tty_port *port = &sport->state->port; unsigned long flags, status; unsigned char rx; bool brk; uart_port_lock_irqsave(sport, &flags); status = readl(sport->membase + UARTSR); while (status & LINFLEXD_UARTSR_RMB) { rx = readb(sport->membase + BDRM); brk = false; flg = TTY_NORMAL; sport->icount.rx++; if (status & (LINFLEXD_UARTSR_BOF | LINFLEXD_UARTSR_FEF | LINFLEXD_UARTSR_PE)) { if (status & LINFLEXD_UARTSR_BOF) sport->icount.overrun++; if (status & LINFLEXD_UARTSR_FEF) { if (!rx) { brk = true; sport->icount.brk++; } else sport->icount.frame++; } if (status & LINFLEXD_UARTSR_PE) sport->icount.parity++; } writel(status, sport->membase + UARTSR); status = readl(sport->membase + UARTSR); if (brk) { uart_handle_break(sport); } else { if (uart_handle_sysrq_char(sport, (unsigned char)rx)) continue; tty_insert_flip_char(port, rx, flg); } } uart_port_unlock_irqrestore(sport, flags); tty_flip_buffer_push(port); return IRQ_HANDLED; } static irqreturn_t linflex_int(int irq, void *dev_id) { struct uart_port *sport = dev_id; unsigned long status; status = readl(sport->membase + UARTSR); if (status & LINFLEXD_UARTSR_DRFRFE) linflex_rxint(irq, dev_id); if (status & LINFLEXD_UARTSR_DTFTFF) linflex_txint(irq, dev_id); return IRQ_HANDLED; } /* return TIOCSER_TEMT when transmitter is not busy */ static unsigned int linflex_tx_empty(struct uart_port *port) { unsigned long status; status = readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF; return status ? TIOCSER_TEMT : 0; } static unsigned int linflex_get_mctrl(struct uart_port *port) { return 0; } static void linflex_set_mctrl(struct uart_port *port, unsigned int mctrl) { } static void linflex_break_ctl(struct uart_port *port, int break_state) { } static void linflex_setup_watermark(struct uart_port *sport) { unsigned long cr, ier, cr1; /* Disable transmission/reception */ ier = readl(sport->membase + LINIER); ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); writel(ier, sport->membase + LINIER); cr = readl(sport->membase + UARTCR); cr &= ~(LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN); writel(cr, sport->membase + UARTCR); /* Enter initialization mode by setting INIT bit */ /* set the Linflex in master mode and activate by-pass filter */ cr1 = LINFLEXD_LINCR1_BF | LINFLEXD_LINCR1_MME | LINFLEXD_LINCR1_INIT; writel(cr1, sport->membase + LINCR1); /* wait for init mode entry */ while ((readl(sport->membase + LINSR) & LINFLEXD_LINSR_LINS_MASK) != LINFLEXD_LINSR_LINS_INITMODE) ; /* * UART = 0x1; - Linflex working in UART mode * TXEN = 0x1; - Enable transmission of data now * RXEn = 0x1; - Receiver enabled * WL0 = 0x1; - 8 bit data * PCE = 0x0; - No parity */ /* set UART bit to allow writing other bits */ writel(LINFLEXD_UARTCR_UART, sport->membase + UARTCR); cr = (LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN | LINFLEXD_UARTCR_WL0 | LINFLEXD_UARTCR_UART); writel(cr, sport->membase + UARTCR); cr1 &= ~(LINFLEXD_LINCR1_INIT); writel(cr1, sport->membase + LINCR1); ier = readl(sport->membase + LINIER); ier |= LINFLEXD_LINIER_DRIE; ier |= LINFLEXD_LINIER_DTIE; writel(ier, sport->membase + LINIER); } static int linflex_startup(struct uart_port *port) { int ret = 0; unsigned long flags; uart_port_lock_irqsave(port, &flags); linflex_setup_watermark(port); uart_port_unlock_irqrestore(port, flags); ret = devm_request_irq(port->dev, port->irq, linflex_int, 0, DRIVER_NAME, port); return ret; } static void linflex_shutdown(struct uart_port *port) { unsigned long ier; unsigned long flags; uart_port_lock_irqsave(port, &flags); /* disable interrupts */ ier = readl(port->membase + LINIER); ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); writel(ier, port->membase + LINIER); uart_port_unlock_irqrestore(port, flags); devm_free_irq(port->dev, port->irq, port); } static void linflex_set_termios(struct uart_port *port, struct ktermios *termios, const struct ktermios *old) { unsigned long flags; unsigned long cr, old_cr, cr1; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; cr = readl(port->membase + UARTCR); old_cr = cr; /* Enter initialization mode by setting INIT bit */ cr1 = readl(port->membase + LINCR1); cr1 |= LINFLEXD_LINCR1_INIT; writel(cr1, port->membase + LINCR1); /* wait for init mode entry */ while ((readl(port->membase + LINSR) & LINFLEXD_LINSR_LINS_MASK) != LINFLEXD_LINSR_LINS_INITMODE) ; /* * only support CS8 and CS7, and for CS7 must enable PE. * supported mode: * - (7,e/o,1) * - (8,n,1) * - (8,e/o,1) */ /* enter the UART into configuration mode */ while ((termios->c_cflag & CSIZE) != CS8 && (termios->c_cflag & CSIZE) != CS7) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS7) { /* Word length: WL1WL0:00 */ cr = old_cr & ~LINFLEXD_UARTCR_WL1 & ~LINFLEXD_UARTCR_WL0; } if ((termios->c_cflag & CSIZE) == CS8) { /* Word length: WL1WL0:01 */ cr = (old_cr | LINFLEXD_UARTCR_WL0) & ~LINFLEXD_UARTCR_WL1; } if (termios->c_cflag & CMSPAR) { if ((termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= CS8; } /* has a space/sticky bit */ cr |= LINFLEXD_UARTCR_WL0; } if (termios->c_cflag & CSTOPB) termios->c_cflag &= ~CSTOPB; /* parity must be enabled when CS7 to match 8-bits format */ if ((termios->c_cflag & CSIZE) == CS7) termios->c_cflag |= PARENB; if ((termios->c_cflag & PARENB)) { cr |= LINFLEXD_UARTCR_PCE; if (termios->c_cflag & PARODD) cr = (cr | LINFLEXD_UARTCR_PC0) & (~LINFLEXD_UARTCR_PC1); else cr = cr & (~LINFLEXD_UARTCR_PC1 & ~LINFLEXD_UARTCR_PC0); } else { cr &= ~LINFLEXD_UARTCR_PCE; } uart_port_lock_irqsave(port, &flags); port->read_status_mask = 0; if (termios->c_iflag & INPCK) port->read_status_mask |= (LINFLEXD_UARTSR_FEF | LINFLEXD_UARTSR_PE0 | LINFLEXD_UARTSR_PE1 | LINFLEXD_UARTSR_PE2 | LINFLEXD_UARTSR_PE3); if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) port->read_status_mask |= LINFLEXD_UARTSR_FEF; /* characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= LINFLEXD_UARTSR_PE; if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= LINFLEXD_UARTSR_PE; /* * if we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= LINFLEXD_UARTSR_BOF; } writel(cr, port->membase + UARTCR); cr1 &= ~(LINFLEXD_LINCR1_INIT); writel(cr1, port->membase + LINCR1); uart_port_unlock_irqrestore(port, flags); } static const char *linflex_type(struct uart_port *port) { return "FSL_LINFLEX"; } static void linflex_release_port(struct uart_port *port) { /* nothing to do */ } static int linflex_request_port(struct uart_port *port) { return 0; } /* configure/auto-configure the port */ static void linflex_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) port->type = PORT_LINFLEXUART; } static const struct uart_ops linflex_pops = { .tx_empty = linflex_tx_empty, .set_mctrl = linflex_set_mctrl, .get_mctrl = linflex_get_mctrl, .stop_tx = linflex_stop_tx, .start_tx = linflex_start_tx, .stop_rx = linflex_stop_rx, .break_ctl = linflex_break_ctl, .startup = linflex_startup, .shutdown = linflex_shutdown, .set_termios = linflex_set_termios, .type = linflex_type, .request_port = linflex_request_port, .release_port = linflex_release_port, .config_port = linflex_config_port, }; static struct uart_port *linflex_ports[UART_NR]; #ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE static void linflex_console_putchar(struct uart_port *port, unsigned char ch) { unsigned long cr; cr = readl(port->membase + UARTCR); writeb(ch, port->membase + BDRL); if (!(cr & LINFLEXD_UARTCR_TFBM)) while ((readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF) != LINFLEXD_UARTSR_DTFTFF) ; else while (readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF) ; if (!(cr & LINFLEXD_UARTCR_TFBM)) { writel((readl(port->membase + UARTSR) | LINFLEXD_UARTSR_DTFTFF), port->membase + UARTSR); } } static void linflex_earlycon_putchar(struct uart_port *port, unsigned char ch) { unsigned long flags; char *ret; if (!linflex_earlycon_same_instance) { linflex_console_putchar(port, ch); return; } spin_lock_irqsave(&init_lock, flags); if (!during_init) goto outside_init; if (earlycon_buf.len >= 1 << CONFIG_LOG_BUF_SHIFT) goto init_release; if (!earlycon_buf.cap) { earlycon_buf.content = kmalloc(EARLYCON_BUFFER_INITIAL_CAP, GFP_ATOMIC); earlycon_buf.cap = earlycon_buf.content ? EARLYCON_BUFFER_INITIAL_CAP : 0; } else if (earlycon_buf.len == earlycon_buf.cap) { ret = krealloc(earlycon_buf.content, earlycon_buf.cap << 1, GFP_ATOMIC); if (ret) { earlycon_buf.content = ret; earlycon_buf.cap <<= 1; } } if (earlycon_buf.len < earlycon_buf.cap) earlycon_buf.content[earlycon_buf.len++] = ch; goto init_release; outside_init: linflex_console_putchar(port, ch); init_release: spin_unlock_irqrestore(&init_lock, flags); } static void linflex_string_write(struct uart_port *sport, const char *s, unsigned int count) { unsigned long cr, ier = 0; ier = readl(sport->membase + LINIER); linflex_stop_tx(sport); cr = readl(sport->membase + UARTCR); cr |= (LINFLEXD_UARTCR_TXEN); writel(cr, sport->membase + UARTCR); uart_console_write(sport, s, count, linflex_console_putchar); writel(ier, sport->membase + LINIER); } static void linflex_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *sport = linflex_ports[co->index]; unsigned long flags; int locked = 1; if (sport->sysrq) locked = 0; else if (oops_in_progress) locked = uart_port_trylock_irqsave(sport, &flags); else uart_port_lock_irqsave(sport, &flags); linflex_string_write(sport, s, count); if (locked) uart_port_unlock_irqrestore(sport, flags); } /* * if the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init linflex_console_get_options(struct uart_port *sport, int *parity, int *bits) { unsigned long cr; cr = readl(sport->membase + UARTCR); cr &= LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN; if (!cr) return; /* ok, the port was enabled */ *parity = 'n'; if (cr & LINFLEXD_UARTCR_PCE) { if (cr & LINFLEXD_UARTCR_PC0) *parity = 'o'; else *parity = 'e'; } if ((cr & LINFLEXD_UARTCR_WL0) && ((cr & LINFLEXD_UARTCR_WL1) == 0)) { if (cr & LINFLEXD_UARTCR_PCE) *bits = 9; else *bits = 8; } } static int __init linflex_console_setup(struct console *co, char *options) { struct uart_port *sport; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; int i; unsigned long flags; /* * check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= ARRAY_SIZE(linflex_ports)) co->index = 0; sport = linflex_ports[co->index]; if (!sport) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else linflex_console_get_options(sport, &parity, &bits); if (earlycon_port && sport->mapbase == earlycon_port->mapbase) { linflex_earlycon_same_instance = true; spin_lock_irqsave(&init_lock, flags); during_init = true; spin_unlock_irqrestore(&init_lock, flags); /* Workaround for character loss or output of many invalid * characters, when INIT mode is entered shortly after a * character has just been printed. */ udelay(PREINIT_DELAY); } linflex_setup_watermark(sport); ret = uart_set_options(sport, co, baud, parity, bits, flow); if (!linflex_earlycon_same_instance) goto done; spin_lock_irqsave(&init_lock, flags); /* Emptying buffer */ if (earlycon_buf.len) { for (i = 0; i < earlycon_buf.len; i++) linflex_console_putchar(earlycon_port, earlycon_buf.content[i]); kfree(earlycon_buf.content); earlycon_buf.len = 0; } during_init = false; spin_unlock_irqrestore(&init_lock, flags); done: return ret; } static struct uart_driver linflex_reg; static struct console linflex_console = { .name = DEV_NAME, .write = linflex_console_write, .device = uart_console_device, .setup = linflex_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &linflex_reg, }; static void linflex_earlycon_write(struct console *con, const char *s, unsigned int n) { struct earlycon_device *dev = con->data; uart_console_write(&dev->port, s, n, linflex_earlycon_putchar); } static int __init linflex_early_console_setup(struct earlycon_device *device, const char *options) { if (!device->port.membase) return -ENODEV; device->con->write = linflex_earlycon_write; earlycon_port = &device->port; return 0; } OF_EARLYCON_DECLARE(linflex, "fsl,s32v234-linflexuart", linflex_early_console_setup); #define LINFLEX_CONSOLE (&linflex_console) #else #define LINFLEX_CONSOLE NULL #endif static struct uart_driver linflex_reg = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = DEV_NAME, .nr = ARRAY_SIZE(linflex_ports), .cons = LINFLEX_CONSOLE, }; static int linflex_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct uart_port *sport; struct resource *res; int ret; sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); if (!sport) return -ENOMEM; ret = of_alias_get_id(np, "serial"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } if (ret >= UART_NR) { dev_err(&pdev->dev, "driver limited to %d serial ports\n", UART_NR); return -ENOMEM; } sport->line = ret; sport->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(sport->membase)) return PTR_ERR(sport->membase); sport->mapbase = res->start; ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; sport->dev = &pdev->dev; sport->iotype = UPIO_MEM; sport->irq = ret; sport->ops = &linflex_pops; sport->flags = UPF_BOOT_AUTOCONF; sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE); linflex_ports[sport->line] = sport; platform_set_drvdata(pdev, sport); return uart_add_one_port(&linflex_reg, sport); } static void linflex_remove(struct platform_device *pdev) { struct uart_port *sport = platform_get_drvdata(pdev); uart_remove_one_port(&linflex_reg, sport); } #ifdef CONFIG_PM_SLEEP static int linflex_suspend(struct device *dev) { struct uart_port *sport = dev_get_drvdata(dev); uart_suspend_port(&linflex_reg, sport); return 0; } static int linflex_resume(struct device *dev) { struct uart_port *sport = dev_get_drvdata(dev); uart_resume_port(&linflex_reg, sport); return 0; } #endif static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume); static struct platform_driver linflex_driver = { .probe = linflex_probe, .remove = linflex_remove, .driver = { .name = DRIVER_NAME, .of_match_table = linflex_dt_ids, .pm = &linflex_pm_ops, }, }; static int __init linflex_serial_init(void) { int ret; ret = uart_register_driver(&linflex_reg); if (ret) return ret; ret = platform_driver_register(&linflex_driver); if (ret) uart_unregister_driver(&linflex_reg); return ret; } static void __exit linflex_serial_exit(void) { platform_driver_unregister(&linflex_driver); uart_unregister_driver(&linflex_reg); } module_init(linflex_serial_init); module_exit(linflex_serial_exit); MODULE_DESCRIPTION("Freescale LINFlexD serial port driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <byteswap.h> #include "utils.h" #include "subunit.h" #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define cpu_to_be32(x) bswap_32(x) #define be32_to_cpu(x) bswap_32(x) #define be16_to_cpup(x) bswap_16(*x) #define cpu_to_be64(x) bswap_64(x) #else #define cpu_to_be32(x) (x) #define be32_to_cpu(x) (x) #define be16_to_cpup(x) (*x) #define cpu_to_be64(x) (x) #endif #include "vphn.c" static struct test { char *descr; long input[VPHN_REGISTER_COUNT]; u32 expected[VPHN_ASSOC_BUFSIZE]; } all_tests[] = { { "vphn: no data", { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000000 } }, { "vphn: 1 x 16-bit value", { 0x8001ffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000001, 0x00000001 } }, { "vphn: 2 x 16-bit values", { 0x80018002ffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000002, 0x00000001, 0x00000002 } }, { "vphn: 3 x 16-bit values", { 0x800180028003ffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000003, 0x00000001, 0x00000002, 0x00000003 } }, { "vphn: 4 x 16-bit values", { 0x8001800280038004, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000004, 0x00000001, 0x00000002, 0x00000003, 0x00000004 } }, { /* Parsing the next 16-bit value out of the next 64-bit input * value. */ "vphn: 5 x 16-bit values", { 0x8001800280038004, 0x8005ffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, }, { 0x00000005, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005 } }, { /* Parse at most 6 x 64-bit input values */ "vphn: 24 x 16-bit values", { 0x8001800280038004, 0x8005800680078008, 0x8009800a800b800c, 0x800d800e800f8010, 0x8011801280138014, 0x8015801680178018 }, { 0x00000018, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000017, 0x00000018 } }, { "vphn: 1 x 32-bit value", { 0x00000001ffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000001, 0x00000001 } }, { "vphn: 2 x 32-bit values", { 0x0000000100000002, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000002, 0x00000001, 0x00000002 } }, { /* Parsing the next 32-bit value out of the next 64-bit input * value. */ "vphn: 3 x 32-bit values", { 0x0000000100000002, 0x00000003ffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000003, 0x00000001, 0x00000002, 0x00000003 } }, { /* Parse at most 6 x 64-bit input values */ "vphn: 12 x 32-bit values", { 0x0000000100000002, 0x0000000300000004, 0x0000000500000006, 0x0000000700000008, 0x000000090000000a, 0x0000000b0000000c }, { 0x0000000c, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c } }, { "vphn: 16-bit value followed by 32-bit value", { 0x800100000002ffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000002, 0x00000001, 0x00000002 } }, { "vphn: 32-bit value followed by 16-bit value", { 0x000000018002ffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000002, 0x00000001, 0x00000002 } }, { /* Parse a 32-bit value split accross two consecutives 64-bit * input values. */ "vphn: 16-bit value followed by 2 x 32-bit values", { 0x8001000000020000, 0x0003ffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000003, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005 } }, { /* The lower bits in 0x0001ffff don't get mixed up with the * 0xffff terminator. */ "vphn: 32-bit value has all ones in 16 lower bits", { 0x0001ffff80028003, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }, { 0x00000003, 0x0001ffff, 0x00000002, 0x00000003 } }, { /* The following input doesn't follow the specification. */ "vphn: last 32-bit value is truncated", { 0x0000000100000002, 0x0000000300000004, 0x0000000500000006, 0x0000000700000008, 0x000000090000000a, 0x0000000b800c2bad }, { 0x0000000c, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c } }, { "vphn: garbage after terminator", { 0xffff2bad2bad2bad, 0x2bad2bad2bad2bad, 0x2bad2bad2bad2bad, 0x2bad2bad2bad2bad, 0x2bad2bad2bad2bad, 0x2bad2bad2bad2bad }, { 0x00000000 } }, { NULL } }; static int test_one(struct test *test) { __be32 output[VPHN_ASSOC_BUFSIZE] = { 0 }; int i, len; vphn_unpack_associativity(test->input, output); len = be32_to_cpu(output[0]); if (len != test->expected[0]) { printf("expected %d elements, got %d\n", test->expected[0], len); return 1; } for (i = 1; i < len; i++) { u32 val = be32_to_cpu(output[i]); if (val != test->expected[i]) { printf("element #%d is 0x%x, should be 0x%x\n", i, val, test->expected[i]); return 1; } } return 0; } static int test_vphn(void) { static struct test *test; for (test = all_tests; test->descr; test++) { int ret; ret = test_one(test); test_finish(test->descr, ret); if (ret) return ret; } return 0; } int main(int argc, char **argv) { return test_harness(test_vphn, "test-vphn"); }
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* Copyright(c) 2024 Realtek Corporation */ #ifndef __RTW8812A_H__ #define __RTW8812A_H__ extern const struct rtw_chip_info rtw8812a_hw_spec; #endif
#ifndef __MEDIA_INFO_H__ #define __MEDIA_INFO_H__ #ifndef MSM_MEDIA_ALIGN #define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\ (((__sz) + (__align) - 1) & (~((__align) - 1)))) #endif #ifndef MSM_MEDIA_ROUNDUP #define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r)) #endif #ifndef MSM_MEDIA_MAX #define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b)) #endif enum color_fmts { /* Venus NV12: * YUV 4:2:0 image with a plane of 8 bit Y samples followed * by an interleaved U/V plane containing 8 bit 2x2 subsampled * colour difference samples. * * <-------- Y/UV_Stride --------> * <------- Width -------> * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * U V U V U V U V U V U V . . . . ^ * U V U V U V U V U V U V . . . . | * U V U V U V U V U V U V . . . . | * U V U V U V U V U V U V . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . --> Buffer size alignment * * Y_Stride : Width aligned to 128 * UV_Stride : Width aligned to 128 * Y_Scanlines: Height aligned to 32 * UV_Scanlines: Height/2 aligned to 16 * Extradata: Arbitrary (software-imposed) padding * Total size = align((Y_Stride * Y_Scanlines * + UV_Stride * UV_Scanlines * + max(Extradata, Y_Stride * 8), 4096) */ COLOR_FMT_NV12, /* Venus NV21: * YUV 4:2:0 image with a plane of 8 bit Y samples followed * by an interleaved V/U plane containing 8 bit 2x2 subsampled * colour difference samples. * * <-------- Y/UV_Stride --------> * <------- Width -------> * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * V U V U V U V U V U V U . . . . ^ * V U V U V U V U V U V U . . . . | * V U V U V U V U V U V U . . . . | * V U V U V U V U V U V U . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment * * Y_Stride : Width aligned to 128 * UV_Stride : Width aligned to 128 * Y_Scanlines: Height aligned to 32 * UV_Scanlines: Height/2 aligned to 16 * Extradata: Arbitrary (software-imposed) padding * Total size = align((Y_Stride * Y_Scanlines * + UV_Stride * UV_Scanlines * + max(Extradata, Y_Stride * 8), 4096) */ COLOR_FMT_NV21, /* Venus NV12_MVTB: * Two YUV 4:2:0 images/views one after the other * in a top-bottom layout, same as NV12 * with a plane of 8 bit Y samples followed * by an interleaved U/V plane containing 8 bit 2x2 subsampled * colour difference samples. * * * <-------- Y/UV_Stride --------> * <------- Width -------> * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | | * . . . . . . . . . . . . . . . . | View_1 * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . V | * U V U V U V U V U V U V . . . . ^ | * U V U V U V U V U V U V . . . . | | * U V U V U V U V U V U V . . . . | | * U V U V U V U V U V U V . . . . UV_Scanlines | * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . V V * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | | * . . . . . . . . . . . . . . . . | View_2 * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . V | * U V U V U V U V U V U V . . . . ^ | * U V U V U V U V U V U V . . . . | | * U V U V U V U V U V U V . . . . | | * U V U V U V U V U V U V . . . . UV_Scanlines | * . . . . . . . . . . . . . . . . | | * . . . . . . . . . . . . . . . . V V * . . . . . . . . . . . . . . . . --> Buffer size alignment * * Y_Stride : Width aligned to 128 * UV_Stride : Width aligned to 128 * Y_Scanlines: Height aligned to 32 * UV_Scanlines: Height/2 aligned to 16 * View_1 begin at: 0 (zero) * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines * Extradata: Arbitrary (software-imposed) padding * Total size = align((2*(Y_Stride * Y_Scanlines) * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096) */ COLOR_FMT_NV12_MVTB, /* * The buffer can be of 2 types: * (1) Venus NV12 UBWC Progressive * (2) Venus NV12 UBWC Interlaced * * (1) Venus NV12 UBWC Progressive Buffer Format: * Compressed Macro-tile format for NV12. * Contains 4 planes in the following order - * (A) Y_Meta_Plane * (B) Y_UBWC_Plane * (C) UV_Meta_Plane * (D) UV_UBWC_Plane * * Y_Meta_Plane consists of meta information to decode compressed * tile data in Y_UBWC_Plane. * Y_UBWC_Plane consists of Y data in compressed macro-tile format. * UBWC decoder block will use the Y_Meta_Plane data together with * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples. * * UV_Meta_Plane consists of meta information to decode compressed * tile data in UV_UBWC_Plane. * UV_UBWC_Plane consists of UV data in compressed macro-tile format. * UBWC decoder block will use UV_Meta_Plane data together with * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2 * subsampled color difference samples. * * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable * and randomly accessible. There is no dependency between tiles. * * <----- Y_Meta_Stride ----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_Y_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <--Compressed tile Y Stride---> * <------- Width -------> * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * <----- UV_Meta_Stride ----> * M M M M M M M M M M M M . . ^ * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . M_UV_Scanlines * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <--Compressed tile UV Stride---> * U* V* U* V* U* V* U* V* . . . . ^ * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * * Y_Stride = align(Width, 128) * UV_Stride = align(Width, 128) * Y_Scanlines = align(Height, 32) * UV_Scanlines = align(Height/2, 16) * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096) * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096) * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size + * Y_Meta_Plane_size + UV_Meta_Plane_size * + max(Extradata, Y_Stride * 48), 4096) * * * (2) Venus NV12 UBWC Interlaced Buffer Format: * Compressed Macro-tile format for NV12 interlaced. * Contains 8 planes in the following order - * (A) Y_Meta_Top_Field_Plane * (B) Y_UBWC_Top_Field_Plane * (C) UV_Meta_Top_Field_Plane * (D) UV_UBWC_Top_Field_Plane * (E) Y_Meta_Bottom_Field_Plane * (F) Y_UBWC_Bottom_Field_Plane * (G) UV_Meta_Bottom_Field_Plane * (H) UV_UBWC_Bottom_Field_Plane * Y_Meta_Top_Field_Plane consists of meta information to decode * compressed tile data for Y_UBWC_Top_Field_Plane. * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile * format for top field of an interlaced frame. * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed * 8 bit Y samples for top field of an interlaced frame. * * UV_Meta_Top_Field_Plane consists of meta information to decode * compressed tile data in UV_UBWC_Top_Field_Plane. * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile * format for top field of an interlaced frame. * UBWC decoder block will use UV_Meta_Top_Field_Plane data together * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed * 8 bit subsampled color difference samples for top field of an * interlaced frame. * * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is * independently decodable and randomly accessible. There is no * dependency between tiles. * * Y_Meta_Bottom_Field_Plane consists of meta information to decode * compressed tile data for Y_UBWC_Bottom_Field_Plane. * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile * format for bottom field of an interlaced frame. * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less * uncompressed 8 bit Y samples for bottom field of an interlaced frame. * * UV_Meta_Bottom_Field_Plane consists of meta information to decode * compressed tile data in UV_UBWC_Bottom_Field_Plane. * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed * macro-tile format for bottom field of an interlaced frame. * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together * with UV_UBWC_Bottom_Field_Plane data to produce loss-less * uncompressed 8 bit subsampled color difference samples for bottom * field of an interlaced frame. * * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is * independently decodable and randomly accessible. There is no * dependency between tiles. * * <-----Y_TF_Meta_Stride----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Half_height | * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <-Compressed tile Y_TF Stride-> * <------- Width -------> * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * <----UV_TF_Meta_Stride----> * M M M M M M M M M M M M . . ^ * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . M_UV_TF_Scanlines * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <-Compressed tile UV_TF Stride-> * U* V* U* V* U* V* U* V* . . . . ^ * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <-----Y_BF_Meta_Stride----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Half_height | * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <-Compressed tile Y_BF Stride-> * <------- Width -------> * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * <----UV_BF_Meta_Stride----> * M M M M M M M M M M M M . . ^ * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . M_UV_BF_Scanlines * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <-Compressed tile UV_BF Stride-> * U* V* U* V* U* V* U* V* . . . . ^ * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * * Half_height = (Height+1)>>1 * Y_TF_Stride = align(Width, 128) * UV_TF_Stride = align(Width, 128) * Y_TF_Scanlines = align(Half_height, 32) * UV_TF_Scanlines = align((Half_height+1)/2, 32) * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096) * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096) * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) * Y_TF_Meta_Plane_size = * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096) * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) * UV_TF_Meta_Plane_size = * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096) * Y_BF_Stride = align(Width, 128) * UV_BF_Stride = align(Width, 128) * Y_BF_Scanlines = align(Half_height, 32) * UV_BF_Scanlines = align((Half_height+1)/2, 32) * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096) * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096) * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) * Y_BF_Meta_Plane_size = * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096) * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) * UV_BF_Meta_Plane_size = * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size + * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size + * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size + * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size + * + max(Extradata, Y_TF_Stride * 48), 4096) */ COLOR_FMT_NV12_UBWC, /* Venus NV12 10-bit UBWC: * Compressed Macro-tile format for NV12. * Contains 4 planes in the following order - * (A) Y_Meta_Plane * (B) Y_UBWC_Plane * (C) UV_Meta_Plane * (D) UV_UBWC_Plane * * Y_Meta_Plane consists of meta information to decode compressed * tile data in Y_UBWC_Plane. * Y_UBWC_Plane consists of Y data in compressed macro-tile format. * UBWC decoder block will use the Y_Meta_Plane data together with * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. * * UV_Meta_Plane consists of meta information to decode compressed * tile data in UV_UBWC_Plane. * UV_UBWC_Plane consists of UV data in compressed macro-tile format. * UBWC decoder block will use UV_Meta_Plane data together with * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 * subsampled color difference samples. * * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable * and randomly accessible. There is no dependency between tiles. * * <----- Y_Meta_Stride -----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_Y_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <--Compressed tile Y Stride---> * <------- Width -------> * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * <----- UV_Meta_Stride ----> * M M M M M M M M M M M M . . ^ * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . M_UV_Scanlines * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <--Compressed tile UV Stride---> * U* V* U* V* U* V* U* V* . . . . ^ * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * * * Y_Stride = align(Width * 4/3, 128) * UV_Stride = align(Width * 4/3, 128) * Y_Scanlines = align(Height, 32) * UV_Scanlines = align(Height/2, 16) * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + * Y_Meta_Plane_size + UV_Meta_Plane_size * + max(Extradata, Y_Stride * 48), 4096) */ COLOR_FMT_NV12_BPP10_UBWC, /* Venus RGBA8888 format: * Contains 1 plane in the following order - * (A) RGBA plane * * <-------- RGB_Stride --------> * <------- Width -------> * R R R R R R R R R R R R . . . . ^ ^ * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . Height | * R R R R R R R R R R R R . . . . | RGB_Scanlines * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * * RGB_Stride = align(Width * 4, 128) * RGB_Scanlines = align(Height, 32) * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) * Extradata = 8k * * Total size = align(RGB_Plane_size + Extradata, 4096) */ COLOR_FMT_RGBA8888, /* Venus RGBA8888 UBWC format: * Contains 2 planes in the following order - * (A) Meta plane * (B) RGBA plane * * <--- RGB_Meta_Stride ----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_RGB_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <-------- RGB_Stride --------> * <------- Width -------> * R R R R R R R R R R R R . . . . ^ ^ * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . Height | * R R R R R R R R R R R R . . . . | RGB_Scanlines * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * * RGB_Stride = align(Width * 4, 128) * RGB_Scanlines = align(Height, 32) * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) * RGB_Meta_Plane_size = align(RGB_Meta_Stride * * RGB_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + * Extradata, 4096) */ COLOR_FMT_RGBA8888_UBWC, /* Venus RGBA1010102 UBWC format: * Contains 2 planes in the following order - * (A) Meta plane * (B) RGBA plane * * <--- RGB_Meta_Stride ----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_RGB_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <-------- RGB_Stride --------> * <------- Width -------> * R R R R R R R R R R R R . . . . ^ ^ * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . Height | * R R R R R R R R R R R R . . . . | RGB_Scanlines * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * * RGB_Stride = align(Width * 4, 256) * RGB_Scanlines = align(Height, 16) * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) * RGB_Meta_Plane_size = align(RGB_Meta_Stride * * RGB_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + * Extradata, 4096) */ COLOR_FMT_RGBA1010102_UBWC, /* Venus RGB565 UBWC format: * Contains 2 planes in the following order - * (A) Meta plane * (B) RGB plane * * <--- RGB_Meta_Stride ----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_RGB_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <-------- RGB_Stride --------> * <------- Width -------> * R R R R R R R R R R R R . . . . ^ ^ * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . Height | * R R R R R R R R R R R R . . . . | RGB_Scanlines * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . | | * R R R R R R R R R R R R . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * * RGB_Stride = align(Width * 2, 128) * RGB_Scanlines = align(Height, 16) * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) * RGB_Meta_Plane_size = align(RGB_Meta_Stride * * RGB_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + * Extradata, 4096) */ COLOR_FMT_RGB565_UBWC, /* P010 UBWC: * Compressed Macro-tile format for NV12. * Contains 4 planes in the following order - * (A) Y_Meta_Plane * (B) Y_UBWC_Plane * (C) UV_Meta_Plane * (D) UV_UBWC_Plane * * Y_Meta_Plane consists of meta information to decode compressed * tile data in Y_UBWC_Plane. * Y_UBWC_Plane consists of Y data in compressed macro-tile format. * UBWC decoder block will use the Y_Meta_Plane data together with * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. * * UV_Meta_Plane consists of meta information to decode compressed * tile data in UV_UBWC_Plane. * UV_UBWC_Plane consists of UV data in compressed macro-tile format. * UBWC decoder block will use UV_Meta_Plane data together with * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 * subsampled color difference samples. * * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable * and randomly accessible. There is no dependency between tiles. * * <----- Y_Meta_Stride -----> * <-------- Width ------> * M M M M M M M M M M M M . . ^ ^ * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . Height | * M M M M M M M M M M M M . . | Meta_Y_Scanlines * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . | | * M M M M M M M M M M M M . . V | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . V * <--Compressed tile Y Stride---> * <------- Width -------> * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * . . . . . . . . . . . . . . . . V * <----- UV_Meta_Stride ----> * M M M M M M M M M M M M . . ^ * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . | * M M M M M M M M M M M M . . M_UV_Scanlines * . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * <--Compressed tile UV Stride---> * U* V* U* V* U* V* U* V* . . . . ^ * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . | * U* V* U* V* U* V* U* V* . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k * * * Y_Stride = align(Width * 2, 256) * UV_Stride = align(Width * 2, 256) * Y_Scanlines = align(Height, 16) * UV_Scanlines = align(Height/2, 16) * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) * Extradata = 8k * * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + * Y_Meta_Plane_size + UV_Meta_Plane_size * + max(Extradata, Y_Stride * 48), 4096) */ COLOR_FMT_P010_UBWC, /* Venus P010: * YUV 4:2:0 image with a plane of 10 bit Y samples followed * by an interleaved U/V plane containing 10 bit 2x2 subsampled * colour difference samples. * * <-------- Y/UV_Stride --------> * <------- Width -------> * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * U V U V U V U V U V U V . . . . ^ * U V U V U V U V U V U V . . . . | * U V U V U V U V U V U V . . . . | * U V U V U V U V U V U V . . . . UV_Scanlines * . . . . . . . . . . . . . . . . | * . . . . . . . . . . . . . . . . V * . . . . . . . . . . . . . . . . --> Buffer size alignment * * Y_Stride : Width * 2 aligned to 128 * UV_Stride : Width * 2 aligned to 128 * Y_Scanlines: Height aligned to 32 * UV_Scanlines: Height/2 aligned to 16 * Extradata: Arbitrary (software-imposed) padding * Total size = align((Y_Stride * Y_Scanlines * + UV_Stride * UV_Scanlines * + max(Extradata, Y_Stride * 8), 4096) */ COLOR_FMT_P010, }; #define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC #define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC #define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC #define COLOR_FMT_P010 COLOR_FMT_P010 /* * Function arguments: * @color_fmt * @width * Progressive: width * Interlaced: width */ static unsigned int VENUS_Y_STRIDE(int color_fmt, int width) { unsigned int stride = 0; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_NV21: case COLOR_FMT_NV12: case COLOR_FMT_NV12_MVTB: case COLOR_FMT_NV12_UBWC: stride = MSM_MEDIA_ALIGN(width, 128); break; case COLOR_FMT_NV12_BPP10_UBWC: stride = MSM_MEDIA_ALIGN(width, 192); stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256); break; case COLOR_FMT_P010_UBWC: stride = MSM_MEDIA_ALIGN(width * 2, 256); break; case COLOR_FMT_P010: stride = MSM_MEDIA_ALIGN(width * 2, 128); break; } return stride; } /* * Function arguments: * @color_fmt * @width * Progressive: width * Interlaced: width */ static unsigned int VENUS_UV_STRIDE(int color_fmt, int width) { unsigned int stride = 0; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_NV21: case COLOR_FMT_NV12: case COLOR_FMT_NV12_MVTB: case COLOR_FMT_NV12_UBWC: stride = MSM_MEDIA_ALIGN(width, 128); break; case COLOR_FMT_NV12_BPP10_UBWC: stride = MSM_MEDIA_ALIGN(width, 192); stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256); break; case COLOR_FMT_P010_UBWC: stride = MSM_MEDIA_ALIGN(width * 2, 256); break; case COLOR_FMT_P010: stride = MSM_MEDIA_ALIGN(width * 2, 128); break; } return stride; } /* * Function arguments: * @color_fmt * @height * Progressive: height * Interlaced: (height+1)>>1 */ static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height) { unsigned int sclines = 0; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_NV21: case COLOR_FMT_NV12: case COLOR_FMT_NV12_MVTB: case COLOR_FMT_NV12_UBWC: case COLOR_FMT_P010: sclines = MSM_MEDIA_ALIGN(height, 32); break; case COLOR_FMT_NV12_BPP10_UBWC: case COLOR_FMT_P010_UBWC: sclines = MSM_MEDIA_ALIGN(height, 16); break; } return sclines; } /* * Function arguments: * @color_fmt * @height * Progressive: height * Interlaced: (height+1)>>1 */ static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height) { unsigned int sclines = 0; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_NV21: case COLOR_FMT_NV12: case COLOR_FMT_NV12_MVTB: case COLOR_FMT_NV12_BPP10_UBWC: case COLOR_FMT_P010_UBWC: case COLOR_FMT_P010: sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16); break; case COLOR_FMT_NV12_UBWC: sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32); break; } return sclines; } /* * Function arguments: * @color_fmt * @width * Progressive: width * Interlaced: width */ static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width) { int y_tile_width = 0, y_meta_stride; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_NV12_UBWC: case COLOR_FMT_P010_UBWC: y_tile_width = 32; break; case COLOR_FMT_NV12_BPP10_UBWC: y_tile_width = 48; break; default: return 0; } y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width); return MSM_MEDIA_ALIGN(y_meta_stride, 64); } /* * Function arguments: * @color_fmt * @height * Progressive: height * Interlaced: (height+1)>>1 */ static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height) { int y_tile_height = 0, y_meta_scanlines; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_NV12_UBWC: y_tile_height = 8; break; case COLOR_FMT_NV12_BPP10_UBWC: case COLOR_FMT_P010_UBWC: y_tile_height = 4; break; default: return 0; } y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height); return MSM_MEDIA_ALIGN(y_meta_scanlines, 16); } /* * Function arguments: * @color_fmt * @width * Progressive: width * Interlaced: width */ static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width) { int uv_tile_width = 0, uv_meta_stride; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_NV12_UBWC: case COLOR_FMT_P010_UBWC: uv_tile_width = 16; break; case COLOR_FMT_NV12_BPP10_UBWC: uv_tile_width = 24; break; default: return 0; } uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width); return MSM_MEDIA_ALIGN(uv_meta_stride, 64); } /* * Function arguments: * @color_fmt * @height * Progressive: height * Interlaced: (height+1)>>1 */ static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height) { int uv_tile_height = 0, uv_meta_scanlines; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_NV12_UBWC: uv_tile_height = 8; break; case COLOR_FMT_NV12_BPP10_UBWC: case COLOR_FMT_P010_UBWC: uv_tile_height = 4; break; default: return 0; } uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height); return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16); } static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width) { unsigned int alignment = 0, bpp = 4; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_RGBA8888: alignment = 128; break; case COLOR_FMT_RGB565_UBWC: alignment = 256; bpp = 2; break; case COLOR_FMT_RGBA8888_UBWC: case COLOR_FMT_RGBA1010102_UBWC: alignment = 256; break; default: return 0; } return MSM_MEDIA_ALIGN(width * bpp, alignment); } static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height) { unsigned int alignment = 0; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_RGBA8888: alignment = 32; break; case COLOR_FMT_RGBA8888_UBWC: case COLOR_FMT_RGBA1010102_UBWC: case COLOR_FMT_RGB565_UBWC: alignment = 16; break; default: return 0; } return MSM_MEDIA_ALIGN(height, alignment); } static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width) { int rgb_meta_stride; if (!width) return 0; switch (color_fmt) { case COLOR_FMT_RGBA8888_UBWC: case COLOR_FMT_RGBA1010102_UBWC: case COLOR_FMT_RGB565_UBWC: rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16); return MSM_MEDIA_ALIGN(rgb_meta_stride, 64); } return 0; } static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height) { int rgb_meta_scanlines; if (!height) return 0; switch (color_fmt) { case COLOR_FMT_RGBA8888_UBWC: case COLOR_FMT_RGBA1010102_UBWC: case COLOR_FMT_RGB565_UBWC: rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4); return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16); } return 0; } #endif
/* * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _sdma1_4_0_SH_MASK_HEADER #define _sdma1_4_0_SH_MASK_HEADER // addressBlock: sdma1_sdma1dec //SDMA1_UCODE_ADDR #define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0 #define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL //SDMA1_UCODE_DATA #define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0 #define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL //SDMA1_VM_CNTL #define SDMA1_VM_CNTL__CMD__SHIFT 0x0 #define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL //SDMA1_VM_CTX_LO #define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2 #define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_VM_CTX_HI #define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0 #define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_ACTIVE_FCN_ID #define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0 #define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 #define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f #define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL #define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L #define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L //SDMA1_VM_CTX_CNTL #define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0 #define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4 #define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L #define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L //SDMA1_VIRT_RESET_REQ #define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0 #define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f #define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL #define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L //SDMA1_VF_ENABLE #define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0 #define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L //SDMA1_CONTEXT_REG_TYPE0 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13 #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L #define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L //SDMA1_CONTEXT_REG_TYPE1 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd #define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 #define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L #define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L #define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L #define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L //SDMA1_CONTEXT_REG_TYPE2 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8 #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9 #define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L #define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L #define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L //SDMA1_CONTEXT_REG_TYPE3 #define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 #define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL //SDMA1_PUB_REG_TYPE0 #define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0 #define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1 #define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4 #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5 #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6 #define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7 #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8 #define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9 #define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10 #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11 #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12 #define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13 #define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14 #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 #define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a #define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b #define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c #define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d #define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e #define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f #define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L #define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L #define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L #define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L #define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L #define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L #define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L #define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L #define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L #define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L #define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L //SDMA1_PUB_REG_TYPE1 #define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0 #define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 #define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2 #define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3 #define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4 #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5 #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6 #define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7 #define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8 #define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9 #define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa #define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb #define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc #define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd #define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12 #define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13 #define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14 #define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15 #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16 #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17 #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18 #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19 #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f #define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L #define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L #define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L #define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L #define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L #define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L #define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L #define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L #define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L #define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L #define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L #define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L #define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L #define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L #define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L #define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L #define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L #define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L #define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L #define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L //SDMA1_PUB_REG_TYPE2 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7 #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8 #define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9 #define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa #define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb #define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc #define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd #define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe #define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf #define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10 #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11 #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12 #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13 #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14 #define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15 #define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16 #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17 #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18 #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19 #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a #define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b #define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL__SHIFT 0x1c #define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d #define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e #define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L #define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L #define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L #define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L #define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L #define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L #define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L #define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L #define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL_MASK 0x10000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L #define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L #define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L //SDMA1_PUB_REG_TYPE3 #define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0 #define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1 #define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x2 #define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L #define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L #define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL //SDMA1_MMHUB_CNTL #define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 #define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL //SDMA1_CONTEXT_GROUP_BOUNDARY #define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 #define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL //SDMA1_POWER_CNTL #define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 #define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 #define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa #define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb #define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc #define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L #define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L #define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L #define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L #define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L //SDMA1_CLK_CTRL #define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0 #define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 #define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc #define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 #define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 #define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a #define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b #define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c #define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d #define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e #define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f #define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL #define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L #define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L #define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L //SDMA1_CNTL #define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0 #define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1 #define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 #define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 #define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 #define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 #define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 #define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 #define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c #define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d #define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e #define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L #define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L #define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L #define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L #define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L #define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L #define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L #define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L #define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L #define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L #define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L //SDMA1_CHICKEN_BITS #define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 #define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 #define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 #define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 #define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa #define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 #define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 #define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 #define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 #define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 #define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a #define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c #define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e #define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L #define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L #define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L #define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L #define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L #define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L #define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L #define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L #define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L #define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L #define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L #define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L #define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L //SDMA1_GB_ADDR_CONFIG #define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 #define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 #define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 #define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc #define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 #define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L #define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L #define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L #define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L #define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L //SDMA1_GB_ADDR_CONFIG_READ #define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 #define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 #define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 #define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc #define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 #define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L #define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L #define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L #define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L #define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L //SDMA1_RB_RPTR_FETCH_HI #define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 #define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_SEM_WAIT_FAIL_TIMER_CNTL #define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 #define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL //SDMA1_RB_RPTR_FETCH #define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 #define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL //SDMA1_IB_OFFSET_FETCH #define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 #define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL //SDMA1_PROGRAM #define SDMA1_PROGRAM__STREAM__SHIFT 0x0 #define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL //SDMA1_STATUS_REG #define SDMA1_STATUS_REG__IDLE__SHIFT 0x0 #define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1 #define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2 #define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3 #define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 #define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 #define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 #define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 #define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 #define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9 #define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa #define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb #define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc #define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd #define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe #define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf #define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 #define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 #define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 #define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 #define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 #define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 #define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 #define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 #define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a #define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b #define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c #define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e #define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f #define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L #define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L #define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L #define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L #define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L #define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L #define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L #define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L #define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L #define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L #define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L #define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L #define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L #define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L #define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L #define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L #define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L #define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L #define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L #define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L #define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L #define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L #define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L #define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L #define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L #define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L #define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L #define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L #define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L //SDMA1_STATUS1_REG #define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 #define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 #define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 #define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 #define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 #define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 #define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 #define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 #define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa #define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd #define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe #define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf #define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 #define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 #define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L #define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L #define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L #define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L #define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L #define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L #define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L #define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L #define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L #define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L #define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L #define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L #define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L #define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L //SDMA1_RD_BURST_CNTL #define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 #define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L //SDMA1_HBM_PAGE_CONFIG #define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 #define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L //SDMA1_UCODE_CHECKSUM #define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0 #define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL //SDMA1_F32_CNTL #define SDMA1_F32_CNTL__HALT__SHIFT 0x0 #define SDMA1_F32_CNTL__STEP__SHIFT 0x1 #define SDMA1_F32_CNTL__HALT_MASK 0x00000001L #define SDMA1_F32_CNTL__STEP_MASK 0x00000002L //SDMA1_FREEZE #define SDMA1_FREEZE__PREEMPT__SHIFT 0x0 #define SDMA1_FREEZE__FREEZE__SHIFT 0x4 #define SDMA1_FREEZE__FROZEN__SHIFT 0x5 #define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6 #define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L #define SDMA1_FREEZE__FREEZE_MASK 0x00000010L #define SDMA1_FREEZE__FROZEN_MASK 0x00000020L #define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L //SDMA1_PHASE0_QUANTUM #define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0 #define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8 #define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e #define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL #define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L #define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L //SDMA1_PHASE1_QUANTUM #define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0 #define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8 #define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e #define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL #define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L #define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L //SDMA1_EDC_CONFIG #define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1 #define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 #define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L #define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L //SDMA1_BA_THRESHOLD #define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0 #define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 #define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL #define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L //SDMA1_ID #define SDMA1_ID__DEVICE_ID__SHIFT 0x0 #define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL //SDMA1_VERSION #define SDMA1_VERSION__MINVER__SHIFT 0x0 #define SDMA1_VERSION__MAJVER__SHIFT 0x8 #define SDMA1_VERSION__REV__SHIFT 0x10 #define SDMA1_VERSION__MINVER_MASK 0x0000007FL #define SDMA1_VERSION__MAJVER_MASK 0x00007F00L #define SDMA1_VERSION__REV_MASK 0x003F0000L //SDMA1_EDC_COUNTER #define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0 #define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1 #define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 #define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 #define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 #define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 #define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe #define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf #define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10 #define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L #define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L #define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L #define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L #define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L #define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L #define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L #define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L #define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L #define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L //SDMA1_EDC_COUNTER_CLEAR #define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 #define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L //SDMA1_STATUS2_REG #define SDMA1_STATUS2_REG__ID__SHIFT 0x0 #define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2 #define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10 #define SDMA1_STATUS2_REG__ID_MASK 0x00000003L #define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL #define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L //SDMA1_ATOMIC_CNTL #define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 #define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f #define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL #define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L //SDMA1_ATOMIC_PREOP_LO #define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 #define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL //SDMA1_ATOMIC_PREOP_HI #define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 #define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL //SDMA1_UTCL1_CNTL #define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 #define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 #define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb #define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe #define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 #define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d #define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L #define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL #define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L #define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L #define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L #define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L //SDMA1_UTCL1_WATERMK #define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 #define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa #define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12 #define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a #define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL #define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L #define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L #define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L //SDMA1_UTCL1_RD_STATUS #define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 #define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 #define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 #define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 #define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 #define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 #define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 #define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 #define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 #define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 #define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa #define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb #define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc #define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd #define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe #define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf #define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 #define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 #define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 #define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 #define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 #define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 #define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 #define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a #define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d #define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e #define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f #define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L #define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L #define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L #define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L #define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L #define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L #define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L #define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L #define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L #define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L #define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L #define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L #define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L #define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L #define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L #define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L #define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L #define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L #define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L #define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L #define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L #define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L #define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L #define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L #define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L #define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L #define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L //SDMA1_UTCL1_WR_STATUS #define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 #define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 #define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 #define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 #define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 #define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 #define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 #define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 #define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 #define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 #define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa #define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb #define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc #define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd #define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe #define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf #define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 #define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 #define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 #define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 #define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 #define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 #define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 #define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 #define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c #define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d #define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e #define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f #define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L #define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L #define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L #define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L #define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L #define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L #define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L #define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L #define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L #define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L #define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L #define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L #define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L #define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L #define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L #define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L #define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L #define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L #define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L #define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L #define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L #define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L #define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L #define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L #define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L #define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L #define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L #define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L //SDMA1_UTCL1_INV0 #define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 #define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 #define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 #define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 #define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 #define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 #define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 #define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 #define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 #define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 #define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa #define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb #define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc #define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c #define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L #define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L #define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L #define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L #define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L #define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L #define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L #define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L #define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L #define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L #define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L #define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L #define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L #define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L //SDMA1_UTCL1_INV1 #define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 #define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL //SDMA1_UTCL1_INV2 #define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 #define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL //SDMA1_UTCL1_RD_XNACK0 #define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 #define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL //SDMA1_UTCL1_RD_XNACK1 #define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 #define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 #define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 #define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a #define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL #define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L #define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L #define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L //SDMA1_UTCL1_WR_XNACK0 #define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 #define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL //SDMA1_UTCL1_WR_XNACK1 #define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 #define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 #define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 #define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a #define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL #define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L #define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L #define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L //SDMA1_UTCL1_TIMEOUT #define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 #define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 #define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL #define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L //SDMA1_UTCL1_PAGE #define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 #define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 #define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 #define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 #define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L #define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL #define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L #define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L //SDMA1_POWER_CNTL_IDLE #define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 #define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 #define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 #define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL #define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L #define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L //SDMA1_RELAX_ORDERING_LUT #define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 #define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 #define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 #define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 #define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 #define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 #define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 #define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 #define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 #define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa #define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb #define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc #define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd #define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe #define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b #define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c #define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d #define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e #define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f #define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L #define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L #define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L #define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L #define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L #define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L #define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L #define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L #define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L #define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L #define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L #define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L #define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L #define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L #define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L #define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L #define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L #define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L #define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L //SDMA1_CHICKEN_BITS_2 #define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 #define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL //SDMA1_STATUS3_REG #define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 #define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 #define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 #define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL #define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L #define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L //SDMA1_PHYSICAL_ADDR_LO #define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 #define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 #define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 #define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc #define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L #define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L #define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L #define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L //SDMA1_PHYSICAL_ADDR_HI #define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL //SDMA1_PHASE2_QUANTUM #define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0 #define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8 #define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e #define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL #define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L #define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L //SDMA1_ERROR_LOG #define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0 #define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10 #define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL #define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L //SDMA1_PUB_DUMMY_REG0 #define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 #define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL //SDMA1_PUB_DUMMY_REG1 #define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 #define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL //SDMA1_PUB_DUMMY_REG2 #define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 #define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL //SDMA1_PUB_DUMMY_REG3 #define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 #define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL //SDMA1_F32_COUNTER #define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0 #define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL //SDMA1_UNBREAKABLE #define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0 #define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L //SDMA1_PERFMON_CNTL #define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 #define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 #define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 #define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa #define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb #define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc #define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L #define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L #define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL #define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L #define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L #define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L //SDMA1_PERFCOUNTER0_RESULT #define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 #define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL //SDMA1_PERFCOUNTER1_RESULT #define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 #define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL //SDMA1_PERFCOUNTER_TAG_DELAY_RANGE #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L #define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L //SDMA1_CRD_CNTL #define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 #define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd #define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L #define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L //SDMA1_MMHUB_TRUSTLVL #define SDMA1_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc #define SDMA1_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf #define SDMA1_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15 #define SDMA1_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L #define SDMA1_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L //SDMA1_GPU_IOV_VIOLATION_LOG #define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 #define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 #define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 #define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12 #define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13 #define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14 #define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18 #define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L #define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L #define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL #define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L #define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L #define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L #define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L //SDMA1_ULV_CNTL #define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0 #define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d #define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e #define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f #define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL #define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L #define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L #define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L //SDMA1_EA_DBIT_ADDR_DATA #define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 #define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL //SDMA1_EA_DBIT_ADDR_INDEX #define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 #define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L //SDMA1_GFX_RB_CNTL #define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 #define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 #define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 #define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 #define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 #define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L #define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL #define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L #define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L #define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L #define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L //SDMA1_GFX_RB_BASE #define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0 #define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_BASE_HI #define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL //SDMA1_GFX_RB_RPTR #define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0 #define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_RPTR_HI #define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_WPTR #define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0 #define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_WPTR_HI #define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_WPTR_POLL_CNTL #define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 #define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 #define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 #define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 #define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 #define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L #define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L #define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L #define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L #define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L //SDMA1_GFX_RB_RPTR_ADDR_HI #define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_RPTR_ADDR_LO #define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_GFX_IB_CNTL #define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 #define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 #define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 #define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 #define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L #define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L #define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L #define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L //SDMA1_GFX_IB_RPTR #define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2 #define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL //SDMA1_GFX_IB_OFFSET #define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL //SDMA1_GFX_IB_BASE_LO #define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 #define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L //SDMA1_GFX_IB_BASE_HI #define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_GFX_IB_SIZE #define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0 #define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL //SDMA1_GFX_SKIP_CNTL #define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 #define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL //SDMA1_GFX_CONTEXT_STATUS #define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 #define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 #define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 #define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 #define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 #define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 #define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 #define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa #define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L #define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L #define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L #define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L #define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L #define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L #define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L #define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L //SDMA1_GFX_DOORBELL #define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c #define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e #define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L #define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L //SDMA1_GFX_CONTEXT_CNTL #define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 #define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L //SDMA1_GFX_STATUS #define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 #define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 #define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL #define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L //SDMA1_GFX_DOORBELL_LOG #define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 #define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 #define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L #define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL //SDMA1_GFX_WATERMARK #define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 #define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 #define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL #define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L //SDMA1_GFX_DOORBELL_OFFSET #define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL //SDMA1_GFX_CSA_ADDR_LO #define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_GFX_CSA_ADDR_HI #define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_GFX_IB_SUB_REMAIN #define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 #define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL //SDMA1_GFX_PREEMPT #define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 #define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L //SDMA1_GFX_DUMMY_REG #define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 #define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_WPTR_POLL_ADDR_HI #define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_GFX_RB_WPTR_POLL_ADDR_LO #define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_GFX_RB_AQL_CNTL #define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 #define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 #define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 #define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L #define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL #define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L //SDMA1_GFX_MINOR_PTR_UPDATE #define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 #define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L //SDMA1_GFX_MIDCMD_DATA0 #define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA1 #define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA2 #define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA3 #define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA4 #define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA5 #define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA6 #define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA7 #define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_DATA8 #define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL //SDMA1_GFX_MIDCMD_CNTL #define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 #define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 #define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 #define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 #define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L #define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L #define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L #define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L //SDMA1_PAGE_RB_CNTL #define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 #define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 #define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 #define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 #define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 #define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L #define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000007EL #define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L #define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L #define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L #define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L //SDMA1_PAGE_RB_BASE #define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0 #define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_BASE_HI #define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL //SDMA1_PAGE_RB_RPTR #define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 #define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_RPTR_HI #define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_WPTR #define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 #define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_WPTR_HI #define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_WPTR_POLL_CNTL #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L #define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L //SDMA1_PAGE_RB_RPTR_ADDR_HI #define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_RPTR_ADDR_LO #define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_PAGE_IB_CNTL #define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 #define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 #define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 #define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 #define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L #define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L #define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L #define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L //SDMA1_PAGE_IB_RPTR #define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 #define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL //SDMA1_PAGE_IB_OFFSET #define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL //SDMA1_PAGE_IB_BASE_LO #define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 #define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L //SDMA1_PAGE_IB_BASE_HI #define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_PAGE_IB_SIZE #define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0 #define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL //SDMA1_PAGE_SKIP_CNTL #define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 #define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL //SDMA1_PAGE_CONTEXT_STATUS #define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 #define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 #define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 #define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 #define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 #define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 #define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 #define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa #define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L #define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L #define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L #define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L #define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L #define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L #define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L #define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L //SDMA1_PAGE_DOORBELL #define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c #define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e #define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L #define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L //SDMA1_PAGE_STATUS #define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 #define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 #define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL #define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L //SDMA1_PAGE_DOORBELL_LOG #define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 #define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 #define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L #define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL //SDMA1_PAGE_WATERMARK #define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 #define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 #define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL #define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L //SDMA1_PAGE_DOORBELL_OFFSET #define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL //SDMA1_PAGE_CSA_ADDR_LO #define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_PAGE_CSA_ADDR_HI #define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_PAGE_IB_SUB_REMAIN #define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 #define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL //SDMA1_PAGE_PREEMPT #define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 #define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L //SDMA1_PAGE_DUMMY_REG #define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 #define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI #define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO #define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_PAGE_RB_AQL_CNTL #define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 #define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 #define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 #define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L #define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL #define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L //SDMA1_PAGE_MINOR_PTR_UPDATE #define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 #define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L //SDMA1_PAGE_MIDCMD_DATA0 #define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA1 #define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA2 #define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA3 #define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA4 #define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA5 #define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA6 #define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA7 #define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_DATA8 #define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL //SDMA1_PAGE_MIDCMD_CNTL #define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 #define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 #define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 #define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 #define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L #define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L #define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L #define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L //SDMA1_RLC0_RB_CNTL #define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 #define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 #define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 #define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 #define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 #define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L #define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL #define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L #define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L #define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L #define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L //SDMA1_RLC0_RB_BASE #define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0 #define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_BASE_HI #define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL //SDMA1_RLC0_RB_RPTR #define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 #define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_RPTR_HI #define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_WPTR #define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 #define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_WPTR_HI #define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_WPTR_POLL_CNTL #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L #define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L //SDMA1_RLC0_RB_RPTR_ADDR_HI #define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_RPTR_ADDR_LO #define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC0_IB_CNTL #define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 #define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 #define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 #define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 #define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L #define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L #define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L #define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L //SDMA1_RLC0_IB_RPTR #define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 #define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL //SDMA1_RLC0_IB_OFFSET #define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL //SDMA1_RLC0_IB_BASE_LO #define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 #define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L //SDMA1_RLC0_IB_BASE_HI #define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC0_IB_SIZE #define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0 #define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL //SDMA1_RLC0_SKIP_CNTL #define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 #define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL //SDMA1_RLC0_CONTEXT_STATUS #define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 #define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 #define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 #define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 #define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 #define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 #define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 #define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa #define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L #define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L #define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L #define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L #define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L #define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L #define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L #define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L //SDMA1_RLC0_DOORBELL #define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c #define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e #define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L #define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L //SDMA1_RLC0_STATUS #define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 #define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 #define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL #define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L //SDMA1_RLC0_DOORBELL_LOG #define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 #define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 #define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L #define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL //SDMA1_RLC0_WATERMARK #define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 #define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 #define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL #define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L //SDMA1_RLC0_DOORBELL_OFFSET #define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL //SDMA1_RLC0_CSA_ADDR_LO #define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC0_CSA_ADDR_HI #define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC0_IB_SUB_REMAIN #define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 #define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL //SDMA1_RLC0_PREEMPT #define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 #define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L //SDMA1_RLC0_DUMMY_REG #define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 #define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI #define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO #define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC0_RB_AQL_CNTL #define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 #define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 #define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 #define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L #define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL #define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L //SDMA1_RLC0_MINOR_PTR_UPDATE #define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 #define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L //SDMA1_RLC0_MIDCMD_DATA0 #define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA1 #define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA2 #define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA3 #define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA4 #define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA5 #define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA6 #define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA7 #define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_DATA8 #define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL //SDMA1_RLC0_MIDCMD_CNTL #define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 #define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 #define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 #define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 #define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L #define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L #define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L #define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L //SDMA1_RLC1_RB_CNTL #define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 #define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 #define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 #define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 #define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 #define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L #define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL #define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L #define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L #define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L #define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L //SDMA1_RLC1_RB_BASE #define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0 #define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_BASE_HI #define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL //SDMA1_RLC1_RB_RPTR #define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 #define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_RPTR_HI #define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_WPTR #define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 #define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_WPTR_HI #define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 #define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_WPTR_POLL_CNTL #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L #define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L //SDMA1_RLC1_RB_RPTR_ADDR_HI #define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_RPTR_ADDR_LO #define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC1_IB_CNTL #define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 #define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 #define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 #define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 #define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L #define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L #define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L #define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L //SDMA1_RLC1_IB_RPTR #define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 #define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL //SDMA1_RLC1_IB_OFFSET #define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL //SDMA1_RLC1_IB_BASE_LO #define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 #define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L //SDMA1_RLC1_IB_BASE_HI #define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC1_IB_SIZE #define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0 #define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL //SDMA1_RLC1_SKIP_CNTL #define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 #define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL //SDMA1_RLC1_CONTEXT_STATUS #define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 #define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 #define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 #define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 #define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 #define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 #define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 #define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa #define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L #define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L #define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L #define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L #define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L #define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L #define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L #define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L //SDMA1_RLC1_DOORBELL #define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c #define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e #define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L #define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L //SDMA1_RLC1_STATUS #define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 #define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 #define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL #define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L //SDMA1_RLC1_DOORBELL_LOG #define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 #define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 #define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L #define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL //SDMA1_RLC1_WATERMARK #define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 #define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 #define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL #define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L //SDMA1_RLC1_DOORBELL_OFFSET #define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 #define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL //SDMA1_RLC1_CSA_ADDR_LO #define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC1_CSA_ADDR_HI #define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC1_IB_SUB_REMAIN #define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 #define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL //SDMA1_RLC1_PREEMPT #define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 #define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L //SDMA1_RLC1_DUMMY_REG #define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 #define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI #define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 #define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL //SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO #define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 #define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL //SDMA1_RLC1_RB_AQL_CNTL #define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 #define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 #define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 #define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L #define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL #define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L //SDMA1_RLC1_MINOR_PTR_UPDATE #define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 #define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L //SDMA1_RLC1_MIDCMD_DATA0 #define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA1 #define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA2 #define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA3 #define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA4 #define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA5 #define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA6 #define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA7 #define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_DATA8 #define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL //SDMA1_RLC1_MIDCMD_CNTL #define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 #define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 #define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 #define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 #define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L #define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L #define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L #define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L #endif
// SPDX-License-Identifier: GPL-2.0 /* * Contains the driver implementation for the V4L2 stateless interface. */ #include <linux/debugfs.h> #include <linux/font.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-vmalloc.h> #include <media/videobuf2-v4l2.h> #include "visl-video.h" #include "visl.h" #include "visl-debugfs.h" #define MIN_CODED_SZ (1024U * 256U) static void visl_set_current_codec(struct visl_ctx *ctx) { u32 fourcc = ctx->coded_fmt.fmt.pix_mp.pixelformat; switch (fourcc) { case V4L2_PIX_FMT_FWHT_STATELESS: ctx->current_codec = VISL_CODEC_FWHT; break; case V4L2_PIX_FMT_MPEG2_SLICE: ctx->current_codec = VISL_CODEC_MPEG2; break; case V4L2_PIX_FMT_VP8_FRAME: ctx->current_codec = VISL_CODEC_VP8; break; case V4L2_PIX_FMT_VP9_FRAME: ctx->current_codec = VISL_CODEC_VP9; break; case V4L2_PIX_FMT_H264_SLICE: ctx->current_codec = VISL_CODEC_H264; break; case V4L2_PIX_FMT_HEVC_SLICE: ctx->current_codec = VISL_CODEC_HEVC; break; case V4L2_PIX_FMT_AV1_FRAME: ctx->current_codec = VISL_CODEC_AV1; break; default: dprintk(ctx->dev, "Warning: unsupported fourcc: %d\n", fourcc); ctx->current_codec = VISL_CODEC_NONE; break; } } static void visl_print_fmt(struct visl_ctx *ctx, const struct v4l2_format *f) { const struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; u32 i; dprintk(ctx->dev, "width: %d\n", pix_mp->width); dprintk(ctx->dev, "height: %d\n", pix_mp->height); dprintk(ctx->dev, "pixelformat: %c%c%c%c\n", pix_mp->pixelformat, (pix_mp->pixelformat >> 8) & 0xff, (pix_mp->pixelformat >> 16) & 0xff, (pix_mp->pixelformat >> 24) & 0xff); dprintk(ctx->dev, "field: %d\n", pix_mp->field); dprintk(ctx->dev, "colorspace: %d\n", pix_mp->colorspace); dprintk(ctx->dev, "num_planes: %d\n", pix_mp->num_planes); dprintk(ctx->dev, "flags: %d\n", pix_mp->flags); dprintk(ctx->dev, "quantization: %d\n", pix_mp->quantization); dprintk(ctx->dev, "xfer_func: %d\n", pix_mp->xfer_func); for (i = 0; i < pix_mp->num_planes; i++) { dprintk(ctx->dev, "plane[%d]: sizeimage: %d\n", i, pix_mp->plane_fmt[i].sizeimage); dprintk(ctx->dev, "plane[%d]: bytesperline: %d\n", i, pix_mp->plane_fmt[i].bytesperline); } } static int visl_tpg_init(struct visl_ctx *ctx) { const struct font_desc *font; const char *font_name = "VGA8x16"; int ret; u32 width = ctx->decoded_fmt.fmt.pix_mp.width; u32 height = ctx->decoded_fmt.fmt.pix_mp.height; struct v4l2_pix_format_mplane *f = &ctx->decoded_fmt.fmt.pix_mp; tpg_free(&ctx->tpg); font = find_font(font_name); if (font) { tpg_init(&ctx->tpg, width, height); ret = tpg_alloc(&ctx->tpg, width); if (ret) goto err_alloc; tpg_set_font(font->data); ret = tpg_s_fourcc(&ctx->tpg, f->pixelformat); if (!ret) goto err_fourcc; tpg_reset_source(&ctx->tpg, width, height, f->field); tpg_s_pattern(&ctx->tpg, TPG_PAT_75_COLORBAR); tpg_s_field(&ctx->tpg, f->field, false); tpg_s_colorspace(&ctx->tpg, f->colorspace); tpg_s_ycbcr_enc(&ctx->tpg, f->ycbcr_enc); tpg_s_quantization(&ctx->tpg, f->quantization); tpg_s_xfer_func(&ctx->tpg, f->xfer_func); } else { v4l2_err(&ctx->dev->v4l2_dev, "Font %s not found\n", font_name); return -EINVAL; } dprintk(ctx->dev, "Initialized the V4L2 test pattern generator, w=%d, h=%d, max_w=%d\n", width, height, width); return 0; err_alloc: return ret; err_fourcc: tpg_free(&ctx->tpg); return ret; } static const u32 visl_decoded_fmts[] = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, }; static const u32 visl_extended_decoded_fmts[] = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_P010, }; const struct visl_coded_format_desc visl_coded_fmts[] = { { .pixelformat = V4L2_PIX_FMT_FWHT_STATELESS, .frmsize = { .min_width = 640, .max_width = 4096, .step_width = 1, .min_height = 360, .max_height = 2160, .step_height = 1, }, .ctrls = &visl_fwht_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_MPEG2_SLICE, .frmsize = { .min_width = 16, .max_width = 1920, .step_width = 1, .min_height = 16, .max_height = 1152, .step_height = 1, }, .ctrls = &visl_mpeg2_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_VP8_FRAME, .frmsize = { .min_width = 64, .max_width = 16383, .step_width = 1, .min_height = 64, .max_height = 16383, .step_height = 1, }, .ctrls = &visl_vp8_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_VP9_FRAME, .frmsize = { .min_width = 64, .max_width = 8192, .step_width = 1, .min_height = 64, .max_height = 4352, .step_height = 1, }, .ctrls = &visl_vp9_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_H264_SLICE, .frmsize = { .min_width = 64, .max_width = 4096, .step_width = 1, .min_height = 64, .max_height = 2304, .step_height = 1, }, .ctrls = &visl_h264_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_HEVC_SLICE, .frmsize = { .min_width = 64, .max_width = 4096, .step_width = 1, .min_height = 64, .max_height = 2304, .step_height = 1, }, .ctrls = &visl_hevc_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, { .pixelformat = V4L2_PIX_FMT_AV1_FRAME, .frmsize = { .min_width = 64, .max_width = 4096, .step_width = 1, .min_height = 64, .max_height = 2304, .step_height = 1, }, .ctrls = &visl_av1_ctrls, .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts), .decoded_fmts = visl_decoded_fmts, }, }; const size_t num_coded_fmts = ARRAY_SIZE(visl_coded_fmts); static const struct visl_coded_format_desc* visl_find_coded_fmt_desc(u32 fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(visl_coded_fmts); i++) { if (visl_coded_fmts[i].pixelformat == fourcc) return &visl_coded_fmts[i]; } return NULL; } static void visl_init_fmt(struct v4l2_format *f, u32 fourcc) { memset(f, 0, sizeof(*f)); f->fmt.pix_mp.pixelformat = fourcc; f->fmt.pix_mp.field = V4L2_FIELD_NONE; f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709; f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT; f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT; } static void visl_reset_coded_fmt(struct visl_ctx *ctx) { struct v4l2_format *f = &ctx->coded_fmt; struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; ctx->coded_format_desc = &visl_coded_fmts[0]; visl_init_fmt(f, ctx->coded_format_desc->pixelformat); f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; f->fmt.pix_mp.width = ctx->coded_format_desc->frmsize.min_width; f->fmt.pix_mp.height = ctx->coded_format_desc->frmsize.min_height; pix_mp->num_planes = 1; pix_mp->plane_fmt[0].sizeimage = pix_mp->width * pix_mp->height * 8; dprintk(ctx->dev, "OUTPUT format was set to:\n"); visl_print_fmt(ctx, &ctx->coded_fmt); visl_set_current_codec(ctx); } static int visl_reset_decoded_fmt(struct visl_ctx *ctx) { struct v4l2_format *f = &ctx->decoded_fmt; u32 decoded_fmt = ctx->coded_format_desc[0].decoded_fmts[0]; visl_init_fmt(f, decoded_fmt); f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, ctx->coded_format_desc->decoded_fmts[0], ctx->coded_fmt.fmt.pix_mp.width, ctx->coded_fmt.fmt.pix_mp.height); dprintk(ctx->dev, "CAPTURE format was set to:\n"); visl_print_fmt(ctx, &ctx->decoded_fmt); return visl_tpg_init(ctx); } int visl_set_default_format(struct visl_ctx *ctx) { visl_reset_coded_fmt(ctx); return visl_reset_decoded_fmt(ctx); } static struct visl_q_data *get_q_data(struct visl_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: return &ctx->q_data[V4L2_M2M_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return &ctx->q_data[V4L2_M2M_DST]; default: break; } return NULL; } static int visl_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, VISL_NAME, sizeof(cap->driver)); strscpy(cap->card, VISL_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", VISL_NAME); return 0; } static int visl_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct visl_ctx *ctx = visl_file_to_ctx(file); u32 index = f->index & ~V4L2_FMTDESC_FLAG_ENUM_ALL; int max_fmts = ctx->coded_format_desc->num_decoded_fmts; const u32 *decoded_fmts = ctx->coded_format_desc->decoded_fmts; if (f->index & V4L2_FMTDESC_FLAG_ENUM_ALL) { max_fmts = ARRAY_SIZE(visl_extended_decoded_fmts); decoded_fmts = visl_extended_decoded_fmts; } f->index = index; if (index >= max_fmts) return -EINVAL; f->pixelformat = decoded_fmts[index]; return 0; } static int visl_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(visl_coded_fmts)) return -EINVAL; f->pixelformat = visl_coded_fmts[f->index].pixelformat; return 0; } static int visl_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct visl_ctx *ctx = visl_file_to_ctx(file); *f = ctx->decoded_fmt; return 0; } static int visl_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct visl_ctx *ctx = visl_file_to_ctx(file); *f = ctx->coded_fmt; return 0; } static int visl_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; struct visl_ctx *ctx = visl_file_to_ctx(file); const struct visl_coded_format_desc *coded_desc; unsigned int i; coded_desc = ctx->coded_format_desc; for (i = 0; i < coded_desc->num_decoded_fmts; i++) { if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat) break; } if (i == coded_desc->num_decoded_fmts) pix_mp->pixelformat = coded_desc->decoded_fmts[0]; v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height, &coded_desc->frmsize); v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat, pix_mp->width, pix_mp->height); pix_mp->field = V4L2_FIELD_NONE; return 0; } static int visl_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; const struct visl_coded_format_desc *coded_desc; coded_desc = visl_find_coded_fmt_desc(pix_mp->pixelformat); if (!coded_desc) { pix_mp->pixelformat = visl_coded_fmts[0].pixelformat; coded_desc = &visl_coded_fmts[0]; } v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height, &coded_desc->frmsize); pix_mp->field = V4L2_FIELD_NONE; pix_mp->num_planes = 1; if (pix_mp->plane_fmt[0].sizeimage == 0) pix_mp->plane_fmt[0].sizeimage = max(MIN_CODED_SZ, pix_mp->width * pix_mp->height * 3); return 0; } static int visl_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct visl_ctx *ctx = visl_file_to_ctx(file); struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; const struct visl_coded_format_desc *desc; struct vb2_queue *peer_vq; int ret; peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (vb2_is_busy(peer_vq)) return -EBUSY; dprintk(ctx->dev, "Trying to set the OUTPUT format to:\n"); visl_print_fmt(ctx, f); ret = visl_try_fmt_vid_out(file, priv, f); if (ret) return ret; desc = visl_find_coded_fmt_desc(f->fmt.pix_mp.pixelformat); ctx->coded_format_desc = desc; ctx->coded_fmt = *f; ret = visl_reset_decoded_fmt(ctx); if (ret) return ret; ctx->decoded_fmt.fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace; ctx->decoded_fmt.fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func; ctx->decoded_fmt.fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; ctx->decoded_fmt.fmt.pix_mp.quantization = f->fmt.pix_mp.quantization; dprintk(ctx->dev, "OUTPUT format was set to:\n"); visl_print_fmt(ctx, &ctx->coded_fmt); visl_set_current_codec(ctx); return 0; } static int visl_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct visl_ctx *ctx = visl_file_to_ctx(file); int ret; dprintk(ctx->dev, "Trying to set the CAPTURE format to:\n"); visl_print_fmt(ctx, f); ret = visl_try_fmt_vid_cap(file, priv, f); if (ret) return ret; ctx->decoded_fmt = *f; dprintk(ctx->dev, "CAPTURE format was set to:\n"); visl_print_fmt(ctx, &ctx->decoded_fmt); visl_tpg_init(ctx); return 0; } static int visl_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { const struct visl_coded_format_desc *fmt; struct visl_ctx *ctx = visl_file_to_ctx(file); if (fsize->index != 0) return -EINVAL; fmt = visl_find_coded_fmt_desc(fsize->pixel_format); if (!fmt) { dprintk(ctx->dev, "Unsupported format for the OUTPUT queue: %d\n", fsize->pixel_format); return -EINVAL; } fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise = fmt->frmsize; return 0; } const struct v4l2_ioctl_ops visl_ioctl_ops = { .vidioc_querycap = visl_querycap, .vidioc_enum_framesizes = visl_enum_framesizes, .vidioc_enum_fmt_vid_cap = visl_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap_mplane = visl_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap_mplane = visl_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap_mplane = visl_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = visl_enum_fmt_vid_out, .vidioc_g_fmt_vid_out_mplane = visl_g_fmt_vid_out, .vidioc_try_fmt_vid_out_mplane = visl_try_fmt_vid_out, .vidioc_s_fmt_vid_out_mplane = visl_s_fmt_vid_out, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd, .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static int visl_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct visl_ctx *ctx = vb2_get_drv_priv(vq); struct v4l2_format *f; u32 i; char *qname; if (V4L2_TYPE_IS_OUTPUT(vq->type)) { f = &ctx->coded_fmt; qname = "Output"; } else { f = &ctx->decoded_fmt; qname = "Capture"; } if (*num_planes) { if (*num_planes != f->fmt.pix_mp.num_planes) return -EINVAL; for (i = 0; i < f->fmt.pix_mp.num_planes; i++) { if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage) return -EINVAL; } } else { *num_planes = f->fmt.pix_mp.num_planes; for (i = 0; i < f->fmt.pix_mp.num_planes; i++) sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; } dprintk(ctx->dev, "%s: %d buffer(s) requested, num_planes=%d.\n", qname, *nbuffers, *num_planes); for (i = 0; i < f->fmt.pix_mp.num_planes; i++) dprintk(ctx->dev, "plane[%d].sizeimage=%d\n", i, f->fmt.pix_mp.plane_fmt[i].sizeimage); return 0; } static void visl_queue_cleanup(struct vb2_queue *vq, u32 state) { struct visl_ctx *ctx = vb2_get_drv_priv(vq); struct vb2_v4l2_buffer *vbuf; dprintk(ctx->dev, "Cleaning up queues\n"); for (;;) { if (V4L2_TYPE_IS_OUTPUT(vq->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (!vbuf) break; v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl); dprintk(ctx->dev, "Marked request %p as complete\n", vbuf->vb2_buf.req_obj.req); v4l2_m2m_buf_done(vbuf, state); dprintk(ctx->dev, "Marked buffer %llu as done, state is %d\n", vbuf->vb2_buf.timestamp, state); } } static int visl_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); vbuf->field = V4L2_FIELD_NONE; return 0; } static int visl_buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct visl_ctx *ctx = vb2_get_drv_priv(vq); u32 plane_sz = vb2_plane_size(vb, 0); struct v4l2_pix_format *pix_fmt; if (V4L2_TYPE_IS_OUTPUT(vq->type)) { pix_fmt = &ctx->coded_fmt.fmt.pix; } else { pix_fmt = &ctx->decoded_fmt.fmt.pix; vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage); } if (plane_sz < pix_fmt->sizeimage) { v4l2_err(&ctx->dev->v4l2_dev, "plane[0] size is %d, sizeimage is %d\n", plane_sz, pix_fmt->sizeimage); return -EINVAL; } return 0; } static int visl_start_streaming(struct vb2_queue *vq, unsigned int count) { struct visl_ctx *ctx = vb2_get_drv_priv(vq); struct visl_q_data *q_data = get_q_data(ctx, vq->type); int rc = 0; if (!q_data) { rc = -EINVAL; goto err; } q_data->sequence = 0; if (V4L2_TYPE_IS_CAPTURE(vq->type)) { ctx->capture_streamon_jiffies = get_jiffies_64(); return 0; } if (WARN_ON(!ctx->coded_format_desc)) { rc = -EINVAL; goto err; } return 0; err: visl_queue_cleanup(vq, VB2_BUF_STATE_QUEUED); return rc; } static void visl_stop_streaming(struct vb2_queue *vq) { struct visl_ctx *ctx = vb2_get_drv_priv(vq); dprintk(ctx->dev, "Stop streaming\n"); visl_queue_cleanup(vq, VB2_BUF_STATE_ERROR); if (!keep_bitstream_buffers) visl_debugfs_clear_bitstream(ctx->dev); } static void visl_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct visl_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static void visl_buf_request_complete(struct vb2_buffer *vb) { struct visl_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); } static const struct vb2_ops visl_qops = { .queue_setup = visl_queue_setup, .buf_out_validate = visl_buf_out_validate, .buf_prepare = visl_buf_prepare, .buf_queue = visl_buf_queue, .start_streaming = visl_start_streaming, .stop_streaming = visl_stop_streaming, .buf_request_complete = visl_buf_request_complete, }; int visl_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct visl_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &visl_qops; src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->vb_mutex; src_vq->supports_requests = true; src_vq->subsystem_flags |= VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &visl_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->vb_mutex; return vb2_queue_init(dst_vq); } int visl_request_validate(struct media_request *req) { struct media_request_object *obj; struct visl_ctx *ctx = NULL; unsigned int count; list_for_each_entry(obj, &req->objects, list) { struct vb2_buffer *vb; if (vb2_request_object_is_buffer(obj)) { vb = container_of(obj, struct vb2_buffer, req_obj); ctx = vb2_get_drv_priv(vb->vb2_queue); break; } } if (!ctx) return -ENOENT; count = vb2_request_buffer_cnt(req); if (!count) { v4l2_err(&ctx->dev->v4l2_dev, "No buffer was provided with the request\n"); return -ENOENT; } else if (count > 1) { v4l2_err(&ctx->dev->v4l2_dev, "More than one buffer was provided with the request\n"); return -EINVAL; } return vb2_request_validate(req); }
// SPDX-License-Identifier: MIT /* * Copyright (C) 2019 Google, Inc. * * Authors: * Sean Paul <[email protected]> */ #include <linux/average.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_mode_config.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> /** * DOC: overview * * This helper library provides an easy way for drivers to leverage the atomic * framework to implement panel self refresh (SR) support. Drivers are * responsible for initializing and cleaning up the SR helpers on load/unload * (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup). * The connector is responsible for setting * &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware * (meaning it knows how to initiate self refresh on the panel). * * Once a crtc has enabled SR using &drm_self_refresh_helper_init, the * helpers will monitor activity and call back into the driver to enable/disable * SR as appropriate. The best way to think about this is that it's a DPMS * on/off request with &drm_crtc_state.self_refresh_active set in crtc state * that tells you to disable/enable SR on the panel instead of power-cycling it. * * During SR, drivers may choose to fully disable their crtc/encoder/bridge * hardware (in which case no driver changes are necessary), or they can inspect * &drm_crtc_state.self_refresh_active if they want to enter low power mode * without full disable (in case full disable/enable is too slow). * * SR will be deactivated if there are any atomic updates affecting the * pipe that is in SR mode. If a crtc is driving multiple connectors, all * connectors must be SR aware and all will enter/exit SR mode at the same time. * * If the crtc and connector are SR aware, but the panel connected does not * support it (or is otherwise unable to enter SR), the driver should fail * atomic_check when &drm_crtc_state.self_refresh_active is true. */ #define SELF_REFRESH_AVG_SEED_MS 200 DECLARE_EWMA(psr_time, 4, 4) struct drm_self_refresh_data { struct drm_crtc *crtc; struct delayed_work entry_work; struct mutex avg_mutex; struct ewma_psr_time entry_avg_ms; struct ewma_psr_time exit_avg_ms; }; static void drm_self_refresh_helper_entry_work(struct work_struct *work) { struct drm_self_refresh_data *sr_data = container_of( to_delayed_work(work), struct drm_self_refresh_data, entry_work); struct drm_crtc *crtc = sr_data->crtc; struct drm_device *dev = crtc->dev; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct drm_connector *conn; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; int i, ret = 0; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(dev); if (!state) { ret = -ENOMEM; goto out_drop_locks; } retry: state->acquire_ctx = &ctx; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } if (!crtc_state->enable) goto out; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) goto out; for_each_new_connector_in_state(state, conn, conn_state, i) { if (!conn_state->self_refresh_aware) goto out; } crtc_state->active = false; crtc_state->self_refresh_active = true; ret = drm_atomic_commit(state); if (ret) goto out; out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_atomic_state_put(state); out_drop_locks: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } /** * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * @state: the state which has just been applied to hardware * @commit_time_ms: the amount of time in ms that this commit took to complete * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in * new state * * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * update the average entry/exit self refresh times on self refresh transitions. * These averages will be used when calculating how long to delay before * entering self refresh mode after activity. */ void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, unsigned int commit_time_ms, unsigned int new_self_refresh_mask) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { bool new_self_refresh_active = new_self_refresh_mask & BIT(i); struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct ewma_psr_time *time; if (old_crtc_state->self_refresh_active == new_self_refresh_active) continue; if (new_self_refresh_active) time = &sr_data->entry_avg_ms; else time = &sr_data->exit_avg_ms; mutex_lock(&sr_data->avg_mutex); ewma_psr_time_add(time, commit_time_ms); mutex_unlock(&sr_data->avg_mutex); } } EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times); /** * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit * @state: the state currently being checked * * Called at the end of atomic check. This function checks the state for flags * incompatible with self refresh exit and changes them. This is a bit * disingenuous since userspace is expecting one thing and we're giving it * another. However in order to keep self refresh entirely hidden from * userspace, this is required. * * At the end, we queue up the self refresh entry work so we can enter PSR after * the desired delay. */ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; if (state->async_update || !state->allow_modeset) { for_each_old_crtc_in_state(state, crtc, crtc_state, i) { if (crtc_state->self_refresh_active) { state->async_update = false; state->allow_modeset = true; break; } } } for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct drm_self_refresh_data *sr_data; unsigned int delay; /* Don't trigger the entry timer when we're already in SR */ if (crtc_state->self_refresh_active) continue; sr_data = crtc->self_refresh_data; if (!sr_data) continue; mutex_lock(&sr_data->avg_mutex); delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) + ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2; mutex_unlock(&sr_data->avg_mutex); mod_delayed_work(system_wq, &sr_data->entry_work, msecs_to_jiffies(delay)); } } EXPORT_SYMBOL(drm_self_refresh_helper_alter_state); /** * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc * @crtc: the crtc which supports self refresh supported displays * * Returns zero if successful or -errno on failure */ int drm_self_refresh_helper_init(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already initialized */ if (WARN_ON(sr_data)) return -EINVAL; sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL); if (!sr_data) return -ENOMEM; INIT_DELAYED_WORK(&sr_data->entry_work, drm_self_refresh_helper_entry_work); sr_data->crtc = crtc; mutex_init(&sr_data->avg_mutex); ewma_psr_time_init(&sr_data->entry_avg_ms); ewma_psr_time_init(&sr_data->exit_avg_ms); /* * Seed the averages so they're non-zero (and sufficiently large * for even poorly performing panels). As time goes on, this will be * averaged out and the values will trend to their true value. */ ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS); ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS); crtc->self_refresh_data = sr_data; return 0; } EXPORT_SYMBOL(drm_self_refresh_helper_init); /** * drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc * @crtc: the crtc to cleanup */ void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already uninitialized */ if (!sr_data) return; crtc->self_refresh_data = NULL; cancel_delayed_work_sync(&sr_data->entry_work); kfree(sr_data); } EXPORT_SYMBOL(drm_self_refresh_helper_cleanup);
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. * Copyright (c) 2023, Linaro Ltd. */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/pm_runtime.h> #include <dt-bindings/clock/qcom,sm8550-dispcc.h> #include "common.h" #include "clk-alpha-pll.h" #include "clk-branch.h" #include "clk-pll.h" #include "clk-rcg.h" #include "clk-regmap.h" #include "clk-regmap-divider.h" #include "clk-regmap-mux.h" #include "reset.h" #include "gdsc.h" /* Need to match the order of clocks in DT binding */ enum { DT_BI_TCXO, DT_BI_TCXO_AO, DT_AHB_CLK, DT_SLEEP_CLK, DT_DSI0_PHY_PLL_OUT_BYTECLK, DT_DSI0_PHY_PLL_OUT_DSICLK, DT_DSI1_PHY_PLL_OUT_BYTECLK, DT_DSI1_PHY_PLL_OUT_DSICLK, DT_DP0_PHY_PLL_LINK_CLK, DT_DP0_PHY_PLL_VCO_DIV_CLK, DT_DP1_PHY_PLL_LINK_CLK, DT_DP1_PHY_PLL_VCO_DIV_CLK, DT_DP2_PHY_PLL_LINK_CLK, DT_DP2_PHY_PLL_VCO_DIV_CLK, DT_DP3_PHY_PLL_LINK_CLK, DT_DP3_PHY_PLL_VCO_DIV_CLK, }; #define DISP_CC_MISC_CMD 0xF000 enum { P_BI_TCXO, P_DISP_CC_PLL0_OUT_MAIN, P_DISP_CC_PLL1_OUT_EVEN, P_DISP_CC_PLL1_OUT_MAIN, P_DP0_PHY_PLL_LINK_CLK, P_DP0_PHY_PLL_VCO_DIV_CLK, P_DP1_PHY_PLL_LINK_CLK, P_DP1_PHY_PLL_VCO_DIV_CLK, P_DP2_PHY_PLL_LINK_CLK, P_DP2_PHY_PLL_VCO_DIV_CLK, P_DP3_PHY_PLL_LINK_CLK, P_DP3_PHY_PLL_VCO_DIV_CLK, P_DSI0_PHY_PLL_OUT_BYTECLK, P_DSI0_PHY_PLL_OUT_DSICLK, P_DSI1_PHY_PLL_OUT_BYTECLK, P_DSI1_PHY_PLL_OUT_DSICLK, P_SLEEP_CLK, }; static struct pll_vco lucid_ole_vco[] = { { 249600000, 2000000000, 0 }, }; static struct alpha_pll_config disp_cc_pll0_config = { .l = 0xd, .alpha = 0x6492, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x82aa299c, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000003, .test_ctl_hi1_val = 0x00009000, .test_ctl_hi2_val = 0x00000034, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000005, }; static struct clk_alpha_pll disp_cc_pll0 = { .offset = 0x0, .vco_table = lucid_ole_vco, .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll0", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_reset_lucid_ole_ops, }, }, }; static struct alpha_pll_config disp_cc_pll1_config = { .l = 0x1f, .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x82aa299c, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000003, .test_ctl_hi1_val = 0x00009000, .test_ctl_hi2_val = 0x00000034, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000005, }; static struct clk_alpha_pll disp_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_ole_vco, .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll1", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_reset_lucid_ole_ops, }, }, }; static const struct parent_map disp_cc_parent_map_0[] = { { P_BI_TCXO, 0 }, }; static const struct clk_parent_data disp_cc_parent_data_0[] = { { .index = DT_BI_TCXO }, }; static const struct clk_parent_data disp_cc_parent_data_0_ao[] = { { .index = DT_BI_TCXO_AO }, }; static const struct parent_map disp_cc_parent_map_1[] = { { P_BI_TCXO, 0 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_1[] = { { .index = DT_BI_TCXO }, { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK }, }; static const struct parent_map disp_cc_parent_map_2[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_2[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_parent_map_3[] = { { P_BI_TCXO, 0 }, { P_DP1_PHY_PLL_LINK_CLK, 2 }, { P_DP2_PHY_PLL_LINK_CLK, 3 }, { P_DP3_PHY_PLL_LINK_CLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_3[] = { { .index = DT_BI_TCXO }, { .index = DT_DP1_PHY_PLL_LINK_CLK }, { .index = DT_DP2_PHY_PLL_LINK_CLK }, { .index = DT_DP3_PHY_PLL_LINK_CLK }, }; static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_4[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK }, }; static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_5[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_parent_map_6[] = { { P_BI_TCXO, 0 }, { P_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_6[] = { { .index = DT_BI_TCXO }, { .hw = &disp_cc_pll1.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_parent_map_7[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP1_PHY_PLL_LINK_CLK, 2 }, { P_DP2_PHY_PLL_LINK_CLK, 3 }, { P_DP3_PHY_PLL_LINK_CLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_7[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP1_PHY_PLL_LINK_CLK }, { .index = DT_DP2_PHY_PLL_LINK_CLK }, { .index = DT_DP3_PHY_PLL_LINK_CLK }, }; static const struct parent_map disp_cc_parent_map_8[] = { { P_BI_TCXO, 0 }, { P_DISP_CC_PLL0_OUT_MAIN, 1 }, { P_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_8[] = { { .index = DT_BI_TCXO }, { .hw = &disp_cc_pll0.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_parent_map_9[] = { { P_SLEEP_CLK, 0 }, }; static const struct clk_parent_data disp_cc_parent_data_9[] = { { .index = DT_SLEEP_CLK }, }; static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { .cmd_rcgr = 0x82e8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_6, .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk_src", .parent_data = disp_cc_parent_data_6, .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { .cmd_rcgr = 0x8108, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = { .cmd_rcgr = 0x8124, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { .cmd_rcgr = 0x81bc, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x8170, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_7, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_7, .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = { .cmd_rcgr = 0x818c, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = { .cmd_rcgr = 0x81a4, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { .cmd_rcgr = 0x8220, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .cmd_rcgr = 0x8204, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = { .cmd_rcgr = 0x81d4, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = { .cmd_rcgr = 0x81ec, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = { .cmd_rcgr = 0x8284, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .cmd_rcgr = 0x8238, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = { .cmd_rcgr = 0x8254, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = { .cmd_rcgr = 0x826c, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = { .cmd_rcgr = 0x82d0, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .cmd_rcgr = 0x82b4, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = { .cmd_rcgr = 0x829c, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .cmd_rcgr = 0x8140, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_5, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { .cmd_rcgr = 0x8158, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_5, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), { } }; static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = { F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), { } }; static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { .cmd_rcgr = 0x80d8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_8, .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk_src", .parent_data = disp_cc_parent_data_8, .num_parents = ARRAY_SIZE(disp_cc_parent_data_8), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { .cmd_rcgr = 0x80a8, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = { .cmd_rcgr = 0x80c0, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { .cmd_rcgr = 0x80f0, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = { F(32000, P_SLEEP_CLK, 1, 0, 0), { } }; static struct clk_rcg2 disp_cc_sleep_clk_src = { .cmd_rcgr = 0xe05c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_9, .freq_tbl = ftbl_disp_cc_sleep_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_sleep_clk_src", .parent_data = disp_cc_parent_data_9, .num_parents = ARRAY_SIZE(disp_cc_parent_data_9), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_xo_clk_src = { .cmd_rcgr = 0xe03c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_xo_clk_src", .parent_data = disp_cc_parent_data_0_ao, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { .reg = 0x8120, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .ops = &clk_regmap_div_ops, }, }; static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { .reg = 0x813c, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .ops = &clk_regmap_div_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = { .reg = 0x8188, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = { .reg = 0x821c, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = { .reg = 0x8250, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = { .reg = 0x82cc, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_branch disp_cc_mdss_accu_clk = { .halt_reg = 0xe058, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0xe058, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_accu_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_ahb1_clk = { .halt_reg = 0xa020, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa020, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_ahb_clk = { .halt_reg = 0x80a4, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x80a4, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte0_clk = { .halt_reg = 0x8028, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8028, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte0_intf_clk = { .halt_reg = 0x802c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x802c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte1_clk = { .halt_reg = 0x8030, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8030, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte1_intf_clk = { .halt_reg = 0x8034, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8034, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_aux_clk = { .halt_reg = 0x8058, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8058, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = { .halt_reg = 0x804c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x804c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_link_clk = { .halt_reg = 0x8040, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8040, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = { .halt_reg = 0x8048, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8048, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = { .halt_reg = 0x8050, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8050, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = { .halt_reg = 0x8054, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8054, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = { .halt_reg = 0x8044, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8044, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_aux_clk = { .halt_reg = 0x8074, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8074, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = { .halt_reg = 0x8070, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8070, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_link_clk = { .halt_reg = 0x8064, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8064, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = { .halt_reg = 0x806c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x806c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = { .halt_reg = 0x805c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x805c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = { .halt_reg = 0x8060, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8060, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = { .halt_reg = 0x8068, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8068, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_aux_clk = { .halt_reg = 0x808c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x808c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = { .halt_reg = 0x8088, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8088, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_link_clk = { .halt_reg = 0x8080, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8080, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = { .halt_reg = 0x8084, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8084, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = { .halt_reg = 0x8078, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8078, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = { .halt_reg = 0x807c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x807c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_aux_clk = { .halt_reg = 0x809c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x809c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = { .halt_reg = 0x80a0, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x80a0, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_link_clk = { .halt_reg = 0x8094, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8094, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = { .halt_reg = 0x8098, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8098, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = { .halt_reg = 0x8090, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8090, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_esc0_clk = { .halt_reg = 0x8038, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8038, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_esc1_clk = { .halt_reg = 0x803c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x803c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp1_clk = { .halt_reg = 0xa004, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_clk = { .halt_reg = 0x800c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x800c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { .halt_reg = 0xa010, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa010, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_lut_clk = { .halt_reg = 0x8018, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x8018, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { .halt_reg = 0xc004, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0xc004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_non_gdsc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_pclk0_clk = { .halt_reg = 0x8004, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_pclk1_clk = { .halt_reg = 0x8008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { .halt_reg = 0xc00c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xc00c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { .halt_reg = 0xc008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xc008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_vsync1_clk = { .halt_reg = 0xa01c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa01c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_vsync_clk = { .halt_reg = 0x8024, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8024, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_sleep_clk = { .halt_reg = 0xe074, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xe074, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_sleep_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_sleep_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct gdsc mdss_gdsc = { .gdscr = 0x9000, .pd = { .name = "mdss_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, }; static struct gdsc mdss_int2_gdsc = { .gdscr = 0xb000, .pd = { .name = "mdss_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, }; static struct clk_regmap *disp_cc_sm8550_clocks[] = { [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr, [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr, [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr, [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr, [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr, [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr, [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr, [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr, [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr, [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr, [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr, [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr, [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr, [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr, [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr, [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr, [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr, [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr, [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr, [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr, [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr, [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr, [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr, [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr, [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr, [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr, [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr, [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr, [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr, [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, [DISP_CC_PLL0] = &disp_cc_pll0.clkr, [DISP_CC_PLL1] = &disp_cc_pll1.clkr, [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr, [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr, [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr, }; static const struct qcom_reset_map disp_cc_sm8550_resets[] = { [DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 }, [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 }, }; static struct gdsc *disp_cc_sm8550_gdscs[] = { [MDSS_GDSC] = &mdss_gdsc, [MDSS_INT2_GDSC] = &mdss_int2_gdsc, }; static const struct regmap_config disp_cc_sm8550_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x11008, .fast_io = true, }; static struct qcom_cc_desc disp_cc_sm8550_desc = { .config = &disp_cc_sm8550_regmap_config, .clks = disp_cc_sm8550_clocks, .num_clks = ARRAY_SIZE(disp_cc_sm8550_clocks), .resets = disp_cc_sm8550_resets, .num_resets = ARRAY_SIZE(disp_cc_sm8550_resets), .gdscs = disp_cc_sm8550_gdscs, .num_gdscs = ARRAY_SIZE(disp_cc_sm8550_gdscs), }; static const struct of_device_id disp_cc_sm8550_match_table[] = { { .compatible = "qcom,sar2130p-dispcc" }, { .compatible = "qcom,sm8550-dispcc" }, { .compatible = "qcom,sm8650-dispcc" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_sm8550_match_table); static int disp_cc_sm8550_probe(struct platform_device *pdev) { struct regmap *regmap; int ret; ret = devm_pm_runtime_enable(&pdev->dev); if (ret) return ret; ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) return ret; regmap = qcom_cc_map(pdev, &disp_cc_sm8550_desc); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); goto err_put_rpm; } if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-dispcc")) { lucid_ole_vco[0].max_freq = 2100000000; disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650; disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw; } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) { disp_cc_pll0_config.l = 0x1f; disp_cc_pll0_config.alpha = 0x4000; disp_cc_pll0_config.user_ctl_val = 0x1; disp_cc_pll1_config.user_ctl_val = 0x1; disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p; } clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); /* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10); /* Keep some clocks always-on */ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8550_desc, regmap); if (ret) goto err_put_rpm; pm_runtime_put(&pdev->dev); return 0; err_put_rpm: pm_runtime_put_sync(&pdev->dev); return ret; } static struct platform_driver disp_cc_sm8550_driver = { .probe = disp_cc_sm8550_probe, .driver = { .name = "disp_cc-sm8550", .of_match_table = disp_cc_sm8550_match_table, }, }; module_platform_driver(disp_cc_sm8550_driver); MODULE_DESCRIPTION("QTI DISPCC SM8550 / SM8650 Driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0+ /* * Retu watchdog driver * * Copyright (C) 2004, 2005 Nokia Corporation * * Based on code written by Amit Kucheria and Michael Buesch. * Rewritten by Aaro Koskinen. */ #include <linux/devm-helpers.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/retu.h> #include <linux/watchdog.h> #include <linux/platform_device.h> /* Watchdog timer values in seconds */ #define RETU_WDT_MAX_TIMER 63 struct retu_wdt_dev { struct retu_dev *rdev; struct device *dev; struct delayed_work ping_work; }; /* * Since Retu watchdog cannot be disabled in hardware, we must kick it * with a timer until userspace watchdog software takes over. If * CONFIG_WATCHDOG_NOWAYOUT is set, we never start the feeding. */ static void retu_wdt_ping_enable(struct retu_wdt_dev *wdev) { retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER); schedule_delayed_work(&wdev->ping_work, round_jiffies_relative(RETU_WDT_MAX_TIMER * HZ / 2)); } static void retu_wdt_ping_disable(struct retu_wdt_dev *wdev) { retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER); cancel_delayed_work_sync(&wdev->ping_work); } static void retu_wdt_ping_work(struct work_struct *work) { struct retu_wdt_dev *wdev = container_of(to_delayed_work(work), struct retu_wdt_dev, ping_work); retu_wdt_ping_enable(wdev); } static int retu_wdt_start(struct watchdog_device *wdog) { struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog); retu_wdt_ping_disable(wdev); return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout); } static int retu_wdt_stop(struct watchdog_device *wdog) { struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog); retu_wdt_ping_enable(wdev); return 0; } static int retu_wdt_ping(struct watchdog_device *wdog) { struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog); return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout); } static int retu_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) { struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog); wdog->timeout = timeout; return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout); } static const struct watchdog_info retu_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Retu watchdog", }; static const struct watchdog_ops retu_wdt_ops = { .owner = THIS_MODULE, .start = retu_wdt_start, .stop = retu_wdt_stop, .ping = retu_wdt_ping, .set_timeout = retu_wdt_set_timeout, }; static int retu_wdt_probe(struct platform_device *pdev) { struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent); bool nowayout = WATCHDOG_NOWAYOUT; struct watchdog_device *retu_wdt; struct retu_wdt_dev *wdev; int ret; retu_wdt = devm_kzalloc(&pdev->dev, sizeof(*retu_wdt), GFP_KERNEL); if (!retu_wdt) return -ENOMEM; wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL); if (!wdev) return -ENOMEM; retu_wdt->info = &retu_wdt_info; retu_wdt->ops = &retu_wdt_ops; retu_wdt->timeout = RETU_WDT_MAX_TIMER; retu_wdt->min_timeout = 0; retu_wdt->max_timeout = RETU_WDT_MAX_TIMER; retu_wdt->parent = &pdev->dev; watchdog_set_drvdata(retu_wdt, wdev); watchdog_set_nowayout(retu_wdt, nowayout); wdev->rdev = rdev; wdev->dev = &pdev->dev; ret = devm_delayed_work_autocancel(&pdev->dev, &wdev->ping_work, retu_wdt_ping_work); if (ret) return ret; ret = devm_watchdog_register_device(&pdev->dev, retu_wdt); if (ret < 0) return ret; if (nowayout) retu_wdt_ping(retu_wdt); else retu_wdt_ping_enable(wdev); return 0; } static struct platform_driver retu_wdt_driver = { .probe = retu_wdt_probe, .driver = { .name = "retu-wdt", }, }; module_platform_driver(retu_wdt_driver); MODULE_ALIAS("platform:retu-wdt"); MODULE_DESCRIPTION("Retu watchdog"); MODULE_AUTHOR("Amit Kucheria"); MODULE_AUTHOR("Aaro Koskinen <[email protected]>"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Device Tree Source for the R-Car M3e-2G (R8A779M3) SoC * * Copyright (C) 2021 Glider bv */ #include "r8a77961.dtsi" / { compatible = "renesas,r8a779m3", "renesas,r8a77961"; }; &cluster0_opp { opp-1800000000 { /delete-property/ turbo-mode; }; opp-2000000000 { opp-hz = /bits/ 64 <2000000000>; opp-microvolt = <960000>; clock-latency-ns = <300000>; turbo-mode; }; };
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nft_fib.h> #include <net/inet_dscp.h> #include <net/ip.h> #include <net/ip_fib.h> #include <net/route.h> /* don't try to find route from mcast/bcast/zeronet */ static __be32 get_saddr(__be32 addr) { if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) || ipv4_is_zeronet(addr)) return 0; return addr; } void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); int noff = skb_network_offset(pkt->skb); u32 *dst = &regs->data[priv->dreg]; const struct net_device *dev = NULL; struct iphdr *iph, _iph; __be32 addr; if (priv->flags & NFTA_FIB_F_IIF) dev = nft_in(pkt); else if (priv->flags & NFTA_FIB_F_OIF) dev = nft_out(pkt); iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; return; } if (priv->flags & NFTA_FIB_F_DADDR) addr = iph->daddr; else addr = iph->saddr; *dst = inet_dev_addr_type(nft_net(pkt), dev, addr); } EXPORT_SYMBOL_GPL(nft_fib4_eval_type); void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); int noff = skb_network_offset(pkt->skb); u32 *dest = &regs->data[priv->dreg]; struct iphdr *iph, _iph; struct fib_result res; struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_uid = sock_net_uid(nft_net(pkt), NULL), .flowi4_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)), }; const struct net_device *oif; const struct net_device *found; /* * Do not set flowi4_oif, it restricts results (for example, asking * for oif 3 will get RTN_UNICAST result even if the daddr exits * on another interface. * * Search results for the desired outinterface instead. */ if (priv->flags & NFTA_FIB_F_OIF) oif = nft_out(pkt); else if (priv->flags & NFTA_FIB_F_IIF) oif = nft_in(pkt); else oif = NULL; if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { nft_fib_store_result(dest, priv, nft_in(pkt)); return; } iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; return; } if (ipv4_is_zeronet(iph->saddr)) { if (ipv4_is_lbcast(iph->daddr) || ipv4_is_local_multicast(iph->daddr)) { nft_fib_store_result(dest, priv, pkt->skb->dev); return; } } if (priv->flags & NFTA_FIB_F_MARK) fl4.flowi4_mark = pkt->skb->mark; fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)); if (priv->flags & NFTA_FIB_F_DADDR) { fl4.daddr = iph->daddr; fl4.saddr = get_saddr(iph->saddr); } else { if (nft_hook(pkt) == NF_INET_FORWARD && priv->flags & NFTA_FIB_F_IIF) fl4.flowi4_iif = nft_out(pkt)->ifindex; fl4.daddr = iph->saddr; fl4.saddr = get_saddr(iph->daddr); } *dest = 0; if (fib_lookup(nft_net(pkt), &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE)) return; switch (res.type) { case RTN_UNICAST: break; case RTN_LOCAL: /* Should not see RTN_LOCAL here */ return; default: break; } if (!oif) { found = FIB_RES_DEV(res); } else { if (!fib_info_nh_uses_dev(res.fi, oif)) return; found = oif; } nft_fib_store_result(dest, priv, found); } EXPORT_SYMBOL_GPL(nft_fib4_eval); static struct nft_expr_type nft_fib4_type; static const struct nft_expr_ops nft_fib4_type_ops = { .type = &nft_fib4_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), .eval = nft_fib4_eval_type, .init = nft_fib_init, .dump = nft_fib_dump, .validate = nft_fib_validate, .reduce = nft_fib_reduce, }; static const struct nft_expr_ops nft_fib4_ops = { .type = &nft_fib4_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), .eval = nft_fib4_eval, .init = nft_fib_init, .dump = nft_fib_dump, .validate = nft_fib_validate, .reduce = nft_fib_reduce, }; static const struct nft_expr_ops * nft_fib4_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { enum nft_fib_result result; if (!tb[NFTA_FIB_RESULT]) return ERR_PTR(-EINVAL); result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT])); switch (result) { case NFT_FIB_RESULT_OIF: return &nft_fib4_ops; case NFT_FIB_RESULT_OIFNAME: return &nft_fib4_ops; case NFT_FIB_RESULT_ADDRTYPE: return &nft_fib4_type_ops; default: return ERR_PTR(-EOPNOTSUPP); } } static struct nft_expr_type nft_fib4_type __read_mostly = { .name = "fib", .select_ops = nft_fib4_select_ops, .policy = nft_fib_policy, .maxattr = NFTA_FIB_MAX, .family = NFPROTO_IPV4, .owner = THIS_MODULE, }; static int __init nft_fib4_module_init(void) { return nft_register_expr(&nft_fib4_type); } static void __exit nft_fib4_module_exit(void) { nft_unregister_expr(&nft_fib4_type); } module_init(nft_fib4_module_init); module_exit(nft_fib4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal <[email protected]>"); MODULE_ALIAS_NFT_AF_EXPR(2, "fib"); MODULE_DESCRIPTION("nftables fib / ip route lookup support");
/* * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR * * Copyright (C) 2010 Jarod Wilson <[email protected]> * Copyright (C) 2009 Nuvoton PS Team * * Special thanks to Nuvoton for providing hardware, spec sheets and * sample code upon which portions of this driver are based. Indirect * thanks also to Maxim Levitsky, whose ene_ir driver this driver is * modeled after. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pnp.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/rc-core.h> #include <linux/pci_ids.h> #include "nuvoton-cir.h" static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt); static const struct nvt_chip nvt_chips[] = { { "w83667hg", NVT_W83667HG }, { "NCT6775F", NVT_6775F }, { "NCT6776F", NVT_6776F }, { "NCT6779D", NVT_6779D }, }; static inline struct device *nvt_get_dev(const struct nvt_dev *nvt) { return nvt->rdev->dev.parent; } static inline bool is_w83667hg(struct nvt_dev *nvt) { return nvt->chip_ver == NVT_W83667HG; } /* write val to config reg */ static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg) { outb(reg, nvt->cr_efir); outb(val, nvt->cr_efdr); } /* read val from config reg */ static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg) { outb(reg, nvt->cr_efir); return inb(nvt->cr_efdr); } /* update config register bit without changing other bits */ static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg) { u8 tmp = nvt_cr_read(nvt, reg) | val; nvt_cr_write(nvt, tmp, reg); } /* enter extended function mode */ static inline int nvt_efm_enable(struct nvt_dev *nvt) { if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME)) return -EBUSY; /* Enabling Extended Function Mode explicitly requires writing 2x */ outb(EFER_EFM_ENABLE, nvt->cr_efir); outb(EFER_EFM_ENABLE, nvt->cr_efir); return 0; } /* exit extended function mode */ static inline void nvt_efm_disable(struct nvt_dev *nvt) { outb(EFER_EFM_DISABLE, nvt->cr_efir); release_region(nvt->cr_efir, 2); } /* * When you want to address a specific logical device, write its logical * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN. */ static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev) { nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL); } /* select and enable logical device with setting EFM mode*/ static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev) { nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, ldev); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); } /* select and disable logical device with setting EFM mode*/ static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev) { nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, ldev); nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); } /* write val to cir config register */ static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset) { outb(val, nvt->cir_addr + offset); } /* read val from cir config register */ static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset) { return inb(nvt->cir_addr + offset); } /* write val to cir wake register */ static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt, u8 val, u8 offset) { outb(val, nvt->cir_wake_addr + offset); } /* read val from cir wake config register */ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset) { return inb(nvt->cir_wake_addr + offset); } /* don't override io address if one is set already */ static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr) { unsigned long old_addr; old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8; old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO); if (old_addr) *ioaddr = old_addr; else { nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI); nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO); } } static void nvt_write_wakeup_codes(struct rc_dev *dev, const u8 *wbuf, int count) { u8 tolerance, config; struct nvt_dev *nvt = dev->priv; unsigned long flags; int i; /* hardcode the tolerance to 10% */ tolerance = DIV_ROUND_UP(count, 10); spin_lock_irqsave(&nvt->lock, flags); nvt_clear_cir_wake_fifo(nvt); nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL); config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); /* enable writes to wake fifo */ nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1, CIR_WAKE_IRCON); if (count) pr_info("Wake samples (%d) =", count); else pr_info("Wake sample fifo cleared"); for (i = 0; i < count; i++) nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA); nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); spin_unlock_irqrestore(&nvt->lock, flags); } static ssize_t wakeup_data_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rc_dev *rc_dev = to_rc_dev(dev); struct nvt_dev *nvt = rc_dev->priv; int fifo_len, duration; unsigned long flags; ssize_t buf_len = 0; int i; spin_lock_irqsave(&nvt->lock, flags); fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT); fifo_len = min(fifo_len, WAKEUP_MAX_SIZE); /* go to first element to be read */ while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); for (i = 0; i < fifo_len; i++) { duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD; buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "%d ", duration); } buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n"); spin_unlock_irqrestore(&nvt->lock, flags); return buf_len; } static ssize_t wakeup_data_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct rc_dev *rc_dev = to_rc_dev(dev); u8 wake_buf[WAKEUP_MAX_SIZE]; char **argv; int i, count; unsigned int val; ssize_t ret; argv = argv_split(GFP_KERNEL, buf, &count); if (!argv) return -ENOMEM; if (!count || count > WAKEUP_MAX_SIZE) { ret = -EINVAL; goto out; } for (i = 0; i < count; i++) { ret = kstrtouint(argv[i], 10, &val); if (ret) goto out; val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD); if (!val || val > 0x7f) { ret = -EINVAL; goto out; } wake_buf[i] = val; /* sequence must start with a pulse */ if (i % 2 == 0) wake_buf[i] |= BUF_PULSE_BIT; } nvt_write_wakeup_codes(rc_dev, wake_buf, count); ret = len; out: argv_free(argv); return ret; } static DEVICE_ATTR_RW(wakeup_data); /* dump current cir register contents */ static void cir_dump_regs(struct nvt_dev *nvt) { nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME); pr_info(" * CR CIR ACTIVE : 0x%x\n", nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); pr_info(" * CR CIR BASE ADDR: 0x%x\n", (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); pr_info(" * CR CIR IRQ NUM: 0x%x\n", nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); nvt_efm_disable(nvt); pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME); pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON)); pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS)); pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN)); pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT)); pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP)); pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC)); pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH)); pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL)); pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON)); pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS)); pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO)); pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT)); pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO)); pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH)); pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL)); pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM)); } /* dump current cir wake register contents */ static void cir_wake_dump_regs(struct nvt_dev *nvt) { u8 i, fifo_len; nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); pr_info("%s: Dump CIR WAKE logical device registers:\n", NVT_DRIVER_NAME); pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n", nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n", (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n", nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); nvt_efm_disable(nvt); pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME); pr_info(" * IRCON: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON)); pr_info(" * IRSTS: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS)); pr_info(" * IREN: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN)); pr_info(" * FIFO CMP DEEP: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP)); pr_info(" * FIFO CMP TOL: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL)); pr_info(" * FIFO COUNT: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT)); pr_info(" * SLCH: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH)); pr_info(" * SLCL: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL)); pr_info(" * FIFOCON: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON)); pr_info(" * SRXFSTS: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS)); pr_info(" * SAMPLE RX FIFO: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO)); pr_info(" * WR FIFO DATA: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA)); pr_info(" * RD FIFO ONLY: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); pr_info(" * RD FIFO ONLY IDX: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)); pr_info(" * FIFO IGNORE: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE)); pr_info(" * IRFSM: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM)); fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT); pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len); pr_info("* Contents ="); for (i = 0; i < fifo_len; i++) pr_cont(" %02x", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); pr_cont("\n"); } static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id) { int i; for (i = 0; i < ARRAY_SIZE(nvt_chips); i++) if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) { nvt->chip_ver = nvt_chips[i].chip_ver; return nvt_chips[i].name; } return NULL; } /* detect hardware features */ static int nvt_hw_detect(struct nvt_dev *nvt) { struct device *dev = nvt_get_dev(nvt); const char *chip_name; int chip_id; nvt_efm_enable(nvt); /* Check if we're wired for the alternate EFER setup */ nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); if (nvt->chip_major == 0xff) { nvt_efm_disable(nvt); nvt->cr_efir = CR_EFIR2; nvt->cr_efdr = CR_EFDR2; nvt_efm_enable(nvt); nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); } nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); nvt_efm_disable(nvt); chip_id = nvt->chip_major << 8 | nvt->chip_minor; if (chip_id == NVT_INVALID) { dev_err(dev, "No device found on either EFM port\n"); return -ENODEV; } chip_name = nvt_find_chip(nvt, chip_id); /* warn, but still let the driver load, if we don't know this chip */ if (!chip_name) dev_warn(dev, "unknown chip, id: 0x%02x 0x%02x, it may not work...", nvt->chip_major, nvt->chip_minor); else dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x", chip_name, nvt->chip_major, nvt->chip_minor); return 0; } static void nvt_cir_ldev_init(struct nvt_dev *nvt) { u8 val, psreg, psmask, psval; if (is_w83667hg(nvt)) { psreg = CR_MULTIFUNC_PIN_SEL; psmask = MULTIFUNC_PIN_SEL_MASK; psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB; } else { psreg = CR_OUTPUT_PIN_SEL; psmask = OUTPUT_PIN_SEL_MASK; psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB; } /* output pin selection: enable CIR, with WB sensor enabled */ val = nvt_cr_read(nvt, psreg); val &= psmask; val |= psval; nvt_cr_write(nvt, val, psreg); /* Select CIR logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_set_ioaddr(nvt, &nvt->cir_addr); nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC); nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d", nvt->cir_addr, nvt->cir_irq); } static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt) { /* Select ACPI logical device and anable it */ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); /* Enable CIR Wake via PSOUT# (Pin60) */ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); /* enable pme interrupt of cir wakeup event */ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); /* Select CIR Wake logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); nvt_set_ioaddr(nvt, &nvt->cir_wake_addr); nvt_dbg("CIR Wake initialized, base io port address: 0x%lx", nvt->cir_wake_addr); } /* clear out the hardware's cir rx fifo */ static void nvt_clear_cir_fifo(struct nvt_dev *nvt) { u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON); nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON); } /* clear out the hardware's cir wake rx fifo */ static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt) { u8 val, config; config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); /* clearing wake fifo works in learning mode only */ nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0, CIR_WAKE_IRCON); val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON); nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR, CIR_WAKE_FIFOCON); nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); } /* clear out the hardware's cir tx fifo */ static void nvt_clear_tx_fifo(struct nvt_dev *nvt) { u8 val; val = nvt_cir_reg_read(nvt, CIR_FIFOCON); nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON); } /* enable RX Trigger Level Reach and Packet End interrupts */ static void nvt_set_cir_iren(struct nvt_dev *nvt) { u8 iren; iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO; nvt_cir_reg_write(nvt, iren, CIR_IREN); } static void nvt_cir_regs_init(struct nvt_dev *nvt) { nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); /* set sample limit count (PE interrupt raised when reached) */ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH); nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL); /* set fifo irq trigger levels */ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV | CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON); /* clear hardware rx and tx fifos */ nvt_clear_cir_fifo(nvt); nvt_clear_tx_fifo(nvt); nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); } static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) { nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); /* * Disable RX, set specific carrier on = low, off = high, * and sample period (currently 50us) */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON); /* clear any and all stray interrupts */ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); } static void nvt_enable_wake(struct nvt_dev *nvt) { unsigned long flags; nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); spin_lock_irqsave(&nvt->lock, flags); nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN | CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON); nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); spin_unlock_irqrestore(&nvt->lock, flags); } #if 0 /* Currently unused */ /* rx carrier detect only works in learning mode, must be called w/lock */ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt) { u32 count, carrier, duration = 0; int i; count = nvt_cir_reg_read(nvt, CIR_FCCL) | nvt_cir_reg_read(nvt, CIR_FCCH) << 8; for (i = 0; i < nvt->pkts; i++) { if (nvt->buf[i] & BUF_PULSE_BIT) duration += nvt->buf[i] & BUF_LEN_MASK; } duration *= SAMPLE_PERIOD; if (!count || !duration) { dev_notice(nvt_get_dev(nvt), "Unable to determine carrier! (c:%u, d:%u)", count, duration); return 0; } carrier = MS_TO_NS(count) / duration; if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) nvt_dbg("WTF? Carrier frequency out of range!"); nvt_dbg("Carrier frequency: %u (count %u, duration %u)", carrier, count, duration); return carrier; } #endif static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev, struct rc_scancode_filter *sc_filter) { u8 buf_val; int i, ret, count; unsigned int val; struct ir_raw_event *raw; u8 wake_buf[WAKEUP_MAX_SIZE]; bool complete; /* Require mask to be set */ if (!sc_filter->mask) return 0; raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL); if (!raw) return -ENOMEM; ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data, raw, WAKEUP_MAX_SIZE); complete = (ret != -ENOBUFS); if (!complete) ret = WAKEUP_MAX_SIZE; else if (ret < 0) goto out_raw; /* Inspect the ir samples */ for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) { val = raw[i].duration / SAMPLE_PERIOD; /* Split too large values into several smaller ones */ while (val > 0 && count < WAKEUP_MAX_SIZE) { /* Skip last value for better comparison tolerance */ if (complete && i == ret - 1 && val < BUF_LEN_MASK) break; /* Clamp values to BUF_LEN_MASK at most */ buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val; wake_buf[count] = buf_val; val -= buf_val; if ((raw[i]).pulse) wake_buf[count] |= BUF_PULSE_BIT; count++; } } nvt_write_wakeup_codes(dev, wake_buf, count); ret = 0; out_raw: kfree(raw); return ret; } /* dump contents of the last rx buffer we got from the hw rx fifo */ static void nvt_dump_rx_buf(struct nvt_dev *nvt) { int i; printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts); for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++) printk(KERN_CONT "0x%02x ", nvt->buf[i]); printk(KERN_CONT "\n"); } /* * Process raw data in rx driver buffer, store it in raw IR event kfifo, * trigger decode when appropriate. * * We get IR data samples one byte at a time. If the msb is set, its a pulse, * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD * (default 50us) intervals for that pulse/space. A discrete signal is * followed by a series of 0x7f packets, then either 0x7<something> or 0x80 * to signal more IR coming (repeats) or end of IR, respectively. We store * sample data in the raw event kfifo until we see 0x7<something> (except f) * or 0x80, at which time, we trigger a decode operation. */ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) { struct ir_raw_event rawir = {}; u8 sample; int i; nvt_dbg_verbose("%s firing", __func__); if (debug) nvt_dump_rx_buf(nvt); nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); for (i = 0; i < nvt->pkts; i++) { sample = nvt->buf[i]; rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); rawir.duration = (sample & BUF_LEN_MASK) * SAMPLE_PERIOD; nvt_dbg("Storing %s with duration %d", rawir.pulse ? "pulse" : "space", rawir.duration); ir_raw_event_store_with_filter(nvt->rdev, &rawir); } nvt->pkts = 0; nvt_dbg("Calling ir_raw_event_handle\n"); ir_raw_event_handle(nvt->rdev); nvt_dbg_verbose("%s done", __func__); } static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt) { dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!"); nvt->pkts = 0; nvt_clear_cir_fifo(nvt); ir_raw_event_overflow(nvt->rdev); } /* copy data from hardware rx fifo into driver buffer */ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) { u8 fifocount; int i; /* Get count of how many bytes to read from RX FIFO */ fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT); nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount); /* Read fifocount bytes from CIR Sample RX FIFO register */ for (i = 0; i < fifocount; i++) nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO); nvt->pkts = fifocount; nvt_dbg("%s: pkts now %d", __func__, nvt->pkts); nvt_process_rx_ir_data(nvt); } static void nvt_cir_log_irqs(u8 status, u8 iren) { nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s", status, iren, status & CIR_IRSTS_RDR ? " RDR" : "", status & CIR_IRSTS_RTR ? " RTR" : "", status & CIR_IRSTS_PE ? " PE" : "", status & CIR_IRSTS_RFO ? " RFO" : "", status & CIR_IRSTS_TE ? " TE" : "", status & CIR_IRSTS_TTR ? " TTR" : "", status & CIR_IRSTS_TFU ? " TFU" : "", status & CIR_IRSTS_GH ? " GH" : "", status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE | CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR | CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : ""); } /* interrupt service routine for incoming and outgoing CIR data */ static irqreturn_t nvt_cir_isr(int irq, void *data) { struct nvt_dev *nvt = data; u8 status, iren; nvt_dbg_verbose("%s firing", __func__); spin_lock(&nvt->lock); /* * Get IR Status register contents. Write 1 to ack/clear * * bit: reg name - description * 7: CIR_IRSTS_RDR - RX Data Ready * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach * 5: CIR_IRSTS_PE - Packet End * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set) * 3: CIR_IRSTS_TE - TX FIFO Empty * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach * 1: CIR_IRSTS_TFU - TX FIFO Underrun * 0: CIR_IRSTS_GH - Min Length Detected */ status = nvt_cir_reg_read(nvt, CIR_IRSTS); iren = nvt_cir_reg_read(nvt, CIR_IREN); /* At least NCT6779D creates a spurious interrupt when the * logical device is being disabled. */ if (status == 0xff && iren == 0xff) { spin_unlock(&nvt->lock); nvt_dbg_verbose("Spurious interrupt detected"); return IRQ_HANDLED; } /* IRQ may be shared with CIR WAKE, therefore check for each * status bit whether the related interrupt source is enabled */ if (!(status & iren)) { spin_unlock(&nvt->lock); nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__); return IRQ_NONE; } /* ack/clear all irq flags we've got */ nvt_cir_reg_write(nvt, status, CIR_IRSTS); nvt_cir_reg_write(nvt, 0, CIR_IRSTS); nvt_cir_log_irqs(status, iren); if (status & CIR_IRSTS_RFO) nvt_handle_rx_fifo_overrun(nvt); else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) nvt_get_rx_ir_data(nvt); spin_unlock(&nvt->lock); nvt_dbg_verbose("%s done", __func__); return IRQ_HANDLED; } static void nvt_enable_cir(struct nvt_dev *nvt) { unsigned long flags; /* enable the CIR logical device */ nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); spin_lock_irqsave(&nvt->lock, flags); /* * Enable TX and RX, specify carrier on = low, off = high, and set * sample period (currently 50us) */ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, CIR_IRCON); /* clear all pending interrupts */ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); /* enable interrupts */ nvt_set_cir_iren(nvt); spin_unlock_irqrestore(&nvt->lock, flags); } static void nvt_disable_cir(struct nvt_dev *nvt) { unsigned long flags; spin_lock_irqsave(&nvt->lock, flags); /* disable CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); /* clear any and all pending interrupts */ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); /* clear all function enable flags */ nvt_cir_reg_write(nvt, 0, CIR_IRCON); /* clear hardware rx and tx fifos */ nvt_clear_cir_fifo(nvt); nvt_clear_tx_fifo(nvt); spin_unlock_irqrestore(&nvt->lock, flags); /* disable the CIR logical device */ nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); } static int nvt_open(struct rc_dev *dev) { struct nvt_dev *nvt = dev->priv; nvt_enable_cir(nvt); return 0; } static void nvt_close(struct rc_dev *dev) { struct nvt_dev *nvt = dev->priv; nvt_disable_cir(nvt); } /* Allocate memory, probe hardware, and initialize everything */ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) { struct nvt_dev *nvt; struct rc_dev *rdev; int ret; nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL); if (!nvt) return -ENOMEM; /* input device for IR remote */ nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW); if (!nvt->rdev) return -ENOMEM; rdev = nvt->rdev; /* activate pnp device */ ret = pnp_activate_dev(pdev); if (ret) { dev_err(&pdev->dev, "Could not activate PNP device!\n"); return ret; } /* validate pnp resources */ if (!pnp_port_valid(pdev, 0) || pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) { dev_err(&pdev->dev, "IR PNP Port not valid!\n"); return -EINVAL; } if (!pnp_irq_valid(pdev, 0)) { dev_err(&pdev->dev, "PNP IRQ not valid!\n"); return -EINVAL; } if (!pnp_port_valid(pdev, 1) || pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) { dev_err(&pdev->dev, "Wake PNP Port not valid!\n"); return -EINVAL; } nvt->cir_addr = pnp_port_start(pdev, 0); nvt->cir_irq = pnp_irq(pdev, 0); nvt->cir_wake_addr = pnp_port_start(pdev, 1); nvt->cr_efir = CR_EFIR; nvt->cr_efdr = CR_EFDR; spin_lock_init(&nvt->lock); pnp_set_drvdata(pdev, nvt); ret = nvt_hw_detect(nvt); if (ret) return ret; /* Initialize CIR & CIR Wake Logical Devices */ nvt_efm_enable(nvt); nvt_cir_ldev_init(nvt); nvt_cir_wake_ldev_init(nvt); nvt_efm_disable(nvt); /* * Initialize CIR & CIR Wake Config Registers * and enable logical devices */ nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); /* Set up the rc device */ rdev->priv = nvt; rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER; rdev->encode_wakeup = true; rdev->open = nvt_open; rdev->close = nvt_close; rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter; rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; rdev->input_phys = "nuvoton/cir0"; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2; rdev->input_id.product = nvt->chip_major; rdev->input_id.version = nvt->chip_minor; rdev->driver_name = NVT_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; rdev->timeout = MS_TO_US(100); /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ rdev->rx_resolution = CIR_SAMPLE_PERIOD; #if 0 rdev->min_timeout = XYZ; rdev->max_timeout = XYZ; #endif ret = devm_rc_register_device(&pdev->dev, rdev); if (ret) return ret; /* now claim resources */ if (!devm_request_region(&pdev->dev, nvt->cir_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) return -EBUSY; ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, NVT_DRIVER_NAME, nvt); if (ret) return ret; if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake")) return -EBUSY; ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data); if (ret) return ret; device_init_wakeup(&pdev->dev, true); dev_notice(&pdev->dev, "driver has been successfully loaded\n"); if (debug) { cir_dump_regs(nvt); cir_wake_dump_regs(nvt); } return 0; } static void nvt_remove(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data); nvt_disable_cir(nvt); /* enable CIR Wake (for IR power-on) */ nvt_enable_wake(nvt); } static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); nvt_dbg("%s called", __func__); mutex_lock(&nvt->rdev->lock); if (nvt->rdev->users) nvt_disable_cir(nvt); mutex_unlock(&nvt->rdev->lock); /* make sure wake is enabled */ nvt_enable_wake(nvt); return 0; } static int nvt_resume(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); nvt_dbg("%s called", __func__); nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); mutex_lock(&nvt->rdev->lock); if (nvt->rdev->users) nvt_enable_cir(nvt); mutex_unlock(&nvt->rdev->lock); return 0; } static void nvt_shutdown(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); nvt_enable_wake(nvt); } static const struct pnp_device_id nvt_ids[] = { { "WEC0530", 0 }, /* CIR */ { "NTN0530", 0 }, /* CIR for new chip's pnp id*/ { "", 0 }, }; static struct pnp_driver nvt_driver = { .name = NVT_DRIVER_NAME, .id_table = nvt_ids, .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .probe = nvt_probe, .remove = nvt_remove, .suspend = nvt_suspend, .resume = nvt_resume, .shutdown = nvt_shutdown, }; module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging output"); MODULE_DEVICE_TABLE(pnp, nvt_ids); MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver"); MODULE_AUTHOR("Jarod Wilson <[email protected]>"); MODULE_LICENSE("GPL"); module_pnp_driver(nvt_driver);
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1993 Hamish Macdonald * Copyright (C) 1999 D. Jeff Dionne * Copyright (C) 2001 Georges Menie, Ken Desmet * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <asm/machdep.h> #include <asm/MC68VZ328.h> #include "m68328.h" #include "screen.h" /***************************************************************************/ /* Init Dragon Engine II hardware */ /***************************************************************************/ static void dragen2_reset(void) { local_irq_disable(); #ifdef CONFIG_INIT_LCD PBDATA |= 0x20; /* disable CCFL light */ PKDATA |= 0x4; /* disable LCD controller */ LCKCON = 0; #endif __asm__ __volatile__( "reset\n\t" "moveal #0x04000000, %a0\n\t" "moveal 0(%a0), %sp\n\t" "moveal 4(%a0), %a0\n\t" "jmp (%a0)" ); } void __init init_dragen2(char *command, int size) { mach_reset = dragen2_reset; #ifdef CONFIG_DIRECT_IO_ACCESS SCR = 0x10; /* allow user access to internal registers */ #endif /* CSGB Init */ CSGBB = 0x4000; CSB = 0x1a1; /* CS8900 init */ /* PK3: hardware sleep function pin, active low */ PKSEL |= PK(3); /* select pin as I/O */ PKDIR |= PK(3); /* select pin as output */ PKDATA |= PK(3); /* set pin high */ /* PF5: hardware reset function pin, active high */ PFSEL |= PF(5); /* select pin as I/O */ PFDIR |= PF(5); /* select pin as output */ PFDATA &= ~PF(5); /* set pin low */ /* cs8900 hardware reset */ PFDATA |= PF(5); { int i; for (i = 0; i < 32000; ++i); } PFDATA &= ~PF(5); /* INT1 enable (cs8900 IRQ) */ PDPOL &= ~PD(1); /* active high signal */ PDIQEG &= ~PD(1); PDIRQEN |= PD(1); /* IRQ enabled */ #ifdef CONFIG_INIT_LCD /* initialize LCD controller */ LSSA = (long) screen_bits; LVPW = 0x14; LXMAX = 0x140; LYMAX = 0xef; LRRA = 0; LPXCD = 3; LPICF = 0x08; LPOLCF = 0; LCKCON = 0x80; PCPDEN = 0xff; PCSEL = 0; /* Enable LCD controller */ PKDIR |= 0x4; PKSEL |= 0x4; PKDATA &= ~0x4; /* Enable CCFL backlighting circuit */ PBDIR |= 0x20; PBSEL |= 0x20; PBDATA &= ~0x20; /* contrast control register */ PFDIR |= 0x1; PFSEL &= ~0x1; PWMR = 0x037F; #endif }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017 Impinj, Inc. * * Author: Andrey Smirnov <[email protected]> */ #ifndef DT_BINDING_RESET_IMX7_H #define DT_BINDING_RESET_IMX7_H #define IMX7_RESET_A7_CORE_POR_RESET0 0 #define IMX7_RESET_A7_CORE_POR_RESET1 1 #define IMX7_RESET_A7_CORE_RESET0 2 #define IMX7_RESET_A7_CORE_RESET1 3 #define IMX7_RESET_A7_DBG_RESET0 4 #define IMX7_RESET_A7_DBG_RESET1 5 #define IMX7_RESET_A7_ETM_RESET0 6 #define IMX7_RESET_A7_ETM_RESET1 7 #define IMX7_RESET_A7_SOC_DBG_RESET 8 #define IMX7_RESET_A7_L2RESET 9 #define IMX7_RESET_SW_M4C_RST 10 #define IMX7_RESET_SW_M4P_RST 11 #define IMX7_RESET_EIM_RST 12 #define IMX7_RESET_HSICPHY_PORT_RST 13 #define IMX7_RESET_USBPHY1_POR 14 #define IMX7_RESET_USBPHY1_PORT_RST 15 #define IMX7_RESET_USBPHY2_POR 16 #define IMX7_RESET_USBPHY2_PORT_RST 17 #define IMX7_RESET_MIPI_PHY_MRST 18 #define IMX7_RESET_MIPI_PHY_SRST 19 /* * IMX7_RESET_PCIEPHY is a logical reset line combining PCIEPHY_BTN * and PCIEPHY_G_RST */ #define IMX7_RESET_PCIEPHY 20 #define IMX7_RESET_PCIEPHY_PERST 21 /* * IMX7_RESET_PCIE_CTRL_APPS_EN is not strictly a reset line, but it * can be used to inhibit PCIe LTTSM, so, in a way, it can be thoguht * of as one */ #define IMX7_RESET_PCIE_CTRL_APPS_EN 22 #define IMX7_RESET_DDRC_PRST 23 #define IMX7_RESET_DDRC_CORE_RST 24 #define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25 #define IMX7_RESET_NUM 26 #endif
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Marvell * * Authors: * Evan Wang <[email protected]> * Miquèl Raynal <[email protected]> * Pali Rohár <[email protected]> * Marek Behún <[email protected]> * * Structure inspired from phy-mvebu-cp110-comphy.c written by Antoine Tenart. * Comphy code from ARM Trusted Firmware ported by Pali Rohár <[email protected]> * and Marek Behún <[email protected]>. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #define PLL_SET_DELAY_US 600 #define COMPHY_PLL_SLEEP 1000 #define COMPHY_PLL_TIMEOUT 150000 /* Comphy lane2 indirect access register offset */ #define COMPHY_LANE2_INDIR_ADDR 0x0 #define COMPHY_LANE2_INDIR_DATA 0x4 /* SATA and USB3 PHY offset compared to SATA PHY */ #define COMPHY_LANE2_REGS_BASE 0x200 /* * When accessing common PHY lane registers directly, we need to shift by 1, * since the registers are 16-bit. */ #define COMPHY_LANE_REG_DIRECT(reg) (((reg) & 0x7FF) << 1) /* COMPHY registers */ #define COMPHY_POWER_PLL_CTRL 0x01 #define PU_IVREF_BIT BIT(15) #define PU_PLL_BIT BIT(14) #define PU_RX_BIT BIT(13) #define PU_TX_BIT BIT(12) #define PU_TX_INTP_BIT BIT(11) #define PU_DFE_BIT BIT(10) #define RESET_DTL_RX_BIT BIT(9) #define PLL_LOCK_BIT BIT(8) #define REF_FREF_SEL_MASK GENMASK(4, 0) #define REF_FREF_SEL_SERDES_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x1) #define REF_FREF_SEL_SERDES_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) #define REF_FREF_SEL_SERDES_50MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x4) #define REF_FREF_SEL_PCIE_USB3_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x2) #define REF_FREF_SEL_PCIE_USB3_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) #define COMPHY_MODE_MASK GENMASK(7, 5) #define COMPHY_MODE_SATA FIELD_PREP(COMPHY_MODE_MASK, 0x0) #define COMPHY_MODE_PCIE FIELD_PREP(COMPHY_MODE_MASK, 0x3) #define COMPHY_MODE_SERDES FIELD_PREP(COMPHY_MODE_MASK, 0x4) #define COMPHY_MODE_USB3 FIELD_PREP(COMPHY_MODE_MASK, 0x5) #define COMPHY_KVCO_CAL_CTRL 0x02 #define USE_MAX_PLL_RATE_BIT BIT(12) #define SPEED_PLL_MASK GENMASK(7, 2) #define SPEED_PLL_VALUE_16 FIELD_PREP(SPEED_PLL_MASK, 0x10) #define COMPHY_DIG_LOOPBACK_EN 0x23 #define SEL_DATA_WIDTH_MASK GENMASK(11, 10) #define DATA_WIDTH_10BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x0) #define DATA_WIDTH_20BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x1) #define DATA_WIDTH_40BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x2) #define PLL_READY_TX_BIT BIT(4) #define COMPHY_SYNC_PATTERN 0x24 #define TXD_INVERT_BIT BIT(10) #define RXD_INVERT_BIT BIT(11) #define COMPHY_SYNC_MASK_GEN 0x25 #define PHY_GEN_MAX_MASK GENMASK(11, 10) #define PHY_GEN_MAX_USB3_5G FIELD_PREP(PHY_GEN_MAX_MASK, 0x1) #define COMPHY_ISOLATION_CTRL 0x26 #define PHY_ISOLATE_MODE BIT(15) #define COMPHY_GEN2_SET2 0x3e #define GS2_TX_SSC_AMP_MASK GENMASK(15, 9) #define GS2_TX_SSC_AMP_4128 FIELD_PREP(GS2_TX_SSC_AMP_MASK, 0x20) #define GS2_VREG_RXTX_MAS_ISET_MASK GENMASK(8, 7) #define GS2_VREG_RXTX_MAS_ISET_60U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ 0x0) #define GS2_VREG_RXTX_MAS_ISET_80U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ 0x1) #define GS2_VREG_RXTX_MAS_ISET_100U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ 0x2) #define GS2_VREG_RXTX_MAS_ISET_120U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ 0x3) #define GS2_RSVD_6_0_MASK GENMASK(6, 0) #define COMPHY_GEN3_SET2 0x3f #define COMPHY_IDLE_SYNC_EN 0x48 #define IDLE_SYNC_EN BIT(12) #define COMPHY_MISC_CTRL0 0x4F #define CLK100M_125M_EN BIT(4) #define TXDCLK_2X_SEL BIT(6) #define CLK500M_EN BIT(7) #define PHY_REF_CLK_SEL BIT(10) #define COMPHY_SFT_RESET 0x52 #define SFT_RST BIT(9) #define SFT_RST_NO_REG BIT(10) #define COMPHY_MISC_CTRL1 0x73 #define SEL_BITS_PCIE_FORCE BIT(15) #define COMPHY_GEN2_SET3 0x112 #define GS3_FFE_CAP_SEL_MASK GENMASK(3, 0) #define GS3_FFE_CAP_SEL_VALUE FIELD_PREP(GS3_FFE_CAP_SEL_MASK, 0xF) /* PIPE registers */ #define COMPHY_PIPE_LANE_CFG0 0x180 #define PRD_TXDEEMPH0_MASK BIT(0) #define PRD_TXMARGIN_MASK GENMASK(3, 1) #define PRD_TXSWING_MASK BIT(4) #define CFG_TX_ALIGN_POS_MASK GENMASK(8, 5) #define COMPHY_PIPE_LANE_CFG1 0x181 #define PRD_TXDEEMPH1_MASK BIT(15) #define USE_MAX_PLL_RATE_EN BIT(9) #define TX_DET_RX_MODE BIT(6) #define GEN2_TX_DATA_DLY_MASK GENMASK(4, 3) #define GEN2_TX_DATA_DLY_DEFT FIELD_PREP(GEN2_TX_DATA_DLY_MASK, 2) #define TX_ELEC_IDLE_MODE_EN BIT(0) #define COMPHY_PIPE_LANE_STAT1 0x183 #define TXDCLK_PCLK_EN BIT(0) #define COMPHY_PIPE_LANE_CFG4 0x188 #define SPREAD_SPECTRUM_CLK_EN BIT(7) #define COMPHY_PIPE_RST_CLK_CTRL 0x1C1 #define PIPE_SOFT_RESET BIT(0) #define PIPE_REG_RESET BIT(1) #define MODE_CORE_CLK_FREQ_SEL BIT(9) #define MODE_PIPE_WIDTH_32 BIT(3) #define MODE_REFDIV_MASK GENMASK(5, 4) #define MODE_REFDIV_BY_4 FIELD_PREP(MODE_REFDIV_MASK, 0x2) #define COMPHY_PIPE_TEST_MODE_CTRL 0x1C2 #define MODE_MARGIN_OVERRIDE BIT(2) #define COMPHY_PIPE_CLK_SRC_LO 0x1C3 #define MODE_CLK_SRC BIT(0) #define BUNDLE_PERIOD_SEL BIT(1) #define BUNDLE_PERIOD_SCALE_MASK GENMASK(3, 2) #define BUNDLE_SAMPLE_CTRL BIT(4) #define PLL_READY_DLY_MASK GENMASK(7, 5) #define CFG_SEL_20B BIT(15) #define COMPHY_PIPE_PWR_MGM_TIM1 0x1D0 #define CFG_PM_OSCCLK_WAIT_MASK GENMASK(15, 12) #define CFG_PM_RXDEN_WAIT_MASK GENMASK(11, 8) #define CFG_PM_RXDEN_WAIT_1_UNIT FIELD_PREP(CFG_PM_RXDEN_WAIT_MASK, 0x1) #define CFG_PM_RXDLOZ_WAIT_MASK GENMASK(7, 0) #define CFG_PM_RXDLOZ_WAIT_7_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0x7) #define CFG_PM_RXDLOZ_WAIT_12_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0xC) /* * This register is not from PHY lane register space. It only exists in the * indirect register space, before the actual PHY lane 2 registers. So the * offset is absolute, not relative to COMPHY_LANE2_REGS_BASE. * It is used only for SATA PHY initialization. */ #define COMPHY_RESERVED_REG 0x0E #define PHYCTRL_FRM_PIN_BIT BIT(13) /* South Bridge PHY Configuration Registers */ #define COMPHY_PHY_REG(lane, reg) (((1 - (lane)) * 0x28) + ((reg) & 0x3f)) /* * lane0: USB3/GbE1 PHY Configuration 1 * lane1: PCIe/GbE0 PHY Configuration 1 * (used only by SGMII code) */ #define COMPHY_PHY_CFG1 0x0 #define PIN_PU_IVREF_BIT BIT(1) #define PIN_RESET_CORE_BIT BIT(11) #define PIN_RESET_COMPHY_BIT BIT(12) #define PIN_PU_PLL_BIT BIT(16) #define PIN_PU_RX_BIT BIT(17) #define PIN_PU_TX_BIT BIT(18) #define PIN_TX_IDLE_BIT BIT(19) #define GEN_RX_SEL_MASK GENMASK(25, 22) #define GEN_RX_SEL_VALUE(val) FIELD_PREP(GEN_RX_SEL_MASK, (val)) #define GEN_TX_SEL_MASK GENMASK(29, 26) #define GEN_TX_SEL_VALUE(val) FIELD_PREP(GEN_TX_SEL_MASK, (val)) #define SERDES_SPEED_1_25_G 0x6 #define SERDES_SPEED_3_125_G 0x8 #define PHY_RX_INIT_BIT BIT(30) /* * lane0: USB3/GbE1 PHY Status 1 * lane1: PCIe/GbE0 PHY Status 1 * (used only by SGMII code) */ #define COMPHY_PHY_STAT1 0x18 #define PHY_RX_INIT_DONE_BIT BIT(0) #define PHY_PLL_READY_RX_BIT BIT(2) #define PHY_PLL_READY_TX_BIT BIT(3) /* PHY Selector */ #define COMPHY_SELECTOR_PHY_REG 0xFC /* bit0: 0: Lane1 is GbE0; 1: Lane1 is PCIe */ #define COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT BIT(0) /* bit4: 0: Lane0 is GbE1; 1: Lane0 is USB3 */ #define COMPHY_SELECTOR_USB3_GBE1_SEL_BIT BIT(4) /* bit8: 0: Lane0 is USB3 instead of GbE1, Lane2 is SATA; 1: Lane2 is USB3 */ #define COMPHY_SELECTOR_USB3_PHY_SEL_BIT BIT(8) struct mvebu_a3700_comphy_conf { unsigned int lane; enum phy_mode mode; int submode; }; #define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode) \ { \ .lane = _lane, \ .mode = _mode, \ .submode = _smode, \ } #define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode) \ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA) #define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode) \ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode) static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { /* lane 0 */ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS), MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII), MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_1000BASEX), MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX), /* lane 1 */ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE), MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII), MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_1000BASEX), MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX), /* lane 2 */ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA), MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS), }; struct mvebu_a3700_comphy_priv { void __iomem *comphy_regs; void __iomem *lane0_phy_regs; /* USB3 and GbE1 */ void __iomem *lane1_phy_regs; /* PCIe and GbE0 */ void __iomem *lane2_phy_indirect; /* SATA and USB3 */ spinlock_t lock; /* for PHY selector access */ bool xtal_is_40m; }; struct mvebu_a3700_comphy_lane { struct mvebu_a3700_comphy_priv *priv; struct device *dev; unsigned int id; enum phy_mode mode; int submode; bool invert_tx; bool invert_rx; }; struct gbe_phy_init_data_fix { u16 addr; u16 value; }; /* Changes to 40M1G25 mode data required for running 40M3G125 init mode */ static struct gbe_phy_init_data_fix gbe_phy_init_fix[] = { { 0x005, 0x07CC }, { 0x015, 0x0000 }, { 0x01B, 0x0000 }, { 0x01D, 0x0000 }, { 0x01E, 0x0000 }, { 0x01F, 0x0000 }, { 0x020, 0x0000 }, { 0x021, 0x0030 }, { 0x026, 0x0888 }, { 0x04D, 0x0152 }, { 0x04F, 0xA020 }, { 0x050, 0x07CC }, { 0x053, 0xE9CA }, { 0x055, 0xBD97 }, { 0x071, 0x3015 }, { 0x076, 0x03AA }, { 0x07C, 0x0FDF }, { 0x0C2, 0x3030 }, { 0x0C3, 0x8000 }, { 0x0E2, 0x5550 }, { 0x0E3, 0x12A4 }, { 0x0E4, 0x7D00 }, { 0x0E6, 0x0C83 }, { 0x101, 0xFCC0 }, { 0x104, 0x0C10 } }; /* 40M1G25 mode init data */ static u16 gbe_phy_init[512] = { /* 0 1 2 3 4 5 6 7 */ /*-----------------------------------------------------------*/ /* 8 9 A B C D E F */ 0x3110, 0xFD83, 0x6430, 0x412F, 0x82C0, 0x06FA, 0x4500, 0x6D26, /* 00 */ 0xAFC0, 0x8000, 0xC000, 0x0000, 0x2000, 0x49CC, 0x0BC9, 0x2A52, /* 08 */ 0x0BD2, 0x0CDE, 0x13D2, 0x0CE8, 0x1149, 0x10E0, 0x0000, 0x0000, /* 10 */ 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x4134, 0x0D2D, 0xFFFF, /* 18 */ 0xFFE0, 0x4030, 0x1016, 0x0030, 0x0000, 0x0800, 0x0866, 0x0000, /* 20 */ 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, /* 28 */ 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 30 */ 0x0000, 0x0000, 0x000F, 0x6A62, 0x1988, 0x3100, 0x3100, 0x3100, /* 38 */ 0x3100, 0xA708, 0x2430, 0x0830, 0x1030, 0x4610, 0xFF00, 0xFF00, /* 40 */ 0x0060, 0x1000, 0x0400, 0x0040, 0x00F0, 0x0155, 0x1100, 0xA02A, /* 48 */ 0x06FA, 0x0080, 0xB008, 0xE3ED, 0x5002, 0xB592, 0x7A80, 0x0001, /* 50 */ 0x020A, 0x8820, 0x6014, 0x8054, 0xACAA, 0xFC88, 0x2A02, 0x45CF, /* 58 */ 0x000F, 0x1817, 0x2860, 0x064F, 0x0000, 0x0204, 0x1800, 0x6000, /* 60 */ 0x810F, 0x4F23, 0x4000, 0x4498, 0x0850, 0x0000, 0x000E, 0x1002, /* 68 */ 0x9D3A, 0x3009, 0xD066, 0x0491, 0x0001, 0x6AB0, 0x0399, 0x3780, /* 70 */ 0x0040, 0x5AC0, 0x4A80, 0x0000, 0x01DF, 0x0000, 0x0007, 0x0000, /* 78 */ 0x2D54, 0x00A1, 0x4000, 0x0100, 0xA20A, 0x0000, 0x0000, 0x0000, /* 80 */ 0x0000, 0x0000, 0x0000, 0x7400, 0x0E81, 0x1000, 0x1242, 0x0210, /* 88 */ 0x80DF, 0x0F1F, 0x2F3F, 0x4F5F, 0x6F7F, 0x0F1F, 0x2F3F, 0x4F5F, /* 90 */ 0x6F7F, 0x4BAD, 0x0000, 0x0000, 0x0800, 0x0000, 0x2400, 0xB651, /* 98 */ 0xC9E0, 0x4247, 0x0A24, 0x0000, 0xAF19, 0x1004, 0x0000, 0x0000, /* A0 */ 0x0000, 0x0013, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B0 */ 0x0000, 0x0000, 0x0000, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, /* B8 */ 0x0000, 0x0000, 0x3010, 0xFA00, 0x0000, 0x0000, 0x0000, 0x0003, /* C0 */ 0x1618, 0x8200, 0x8000, 0x0400, 0x050F, 0x0000, 0x0000, 0x0000, /* C8 */ 0x4C93, 0x0000, 0x1000, 0x1120, 0x0010, 0x1242, 0x1242, 0x1E00, /* D0 */ 0x0000, 0x0000, 0x0000, 0x00F8, 0x0000, 0x0041, 0x0800, 0x0000, /* D8 */ 0x82A0, 0x572E, 0x2490, 0x14A9, 0x4E00, 0x0000, 0x0803, 0x0541, /* E0 */ 0x0C15, 0x0000, 0x0000, 0x0400, 0x2626, 0x0000, 0x0000, 0x4200, /* E8 */ 0x0000, 0xAA55, 0x1020, 0x0000, 0x0000, 0x5010, 0x0000, 0x0000, /* F0 */ 0x0000, 0x0000, 0x5000, 0x0000, 0x0000, 0x0000, 0x02F2, 0x0000, /* F8 */ 0x101F, 0xFDC0, 0x4000, 0x8010, 0x0110, 0x0006, 0x0000, 0x0000, /*100 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*108 */ 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04C6, 0x0000, /*110 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*118 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*120 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*128 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*130 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*138 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*140 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*148 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*150 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*158 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*160 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*168 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*170 */ 0x0000, 0x0000, 0x0000, 0x00F0, 0x08A2, 0x3112, 0x0A14, 0x0000, /*178 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*180 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*188 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*190 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*198 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1F0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 /*1F8 */ }; static inline void comphy_reg_set(void __iomem *addr, u32 data, u32 mask) { u32 val; val = readl(addr); val = (val & ~mask) | (data & mask); writel(val, addr); } static inline void comphy_reg_set16(void __iomem *addr, u16 data, u16 mask) { u16 val; val = readw(addr); val = (val & ~mask) | (data & mask); writew(val, addr); } /* Used for accessing lane 2 registers (SATA/USB3 PHY) */ static void comphy_set_indirect(struct mvebu_a3700_comphy_priv *priv, u32 offset, u16 data, u16 mask) { writel(offset, priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_ADDR); comphy_reg_set(priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_DATA, data, mask); } static void comphy_lane_reg_set(struct mvebu_a3700_comphy_lane *lane, u16 reg, u16 data, u16 mask) { if (lane->id == 2) { /* lane 2 PHY registers are accessed indirectly */ comphy_set_indirect(lane->priv, reg + COMPHY_LANE2_REGS_BASE, data, mask); } else { void __iomem *base = lane->id == 1 ? lane->priv->lane1_phy_regs : lane->priv->lane0_phy_regs; comphy_reg_set16(base + COMPHY_LANE_REG_DIRECT(reg), data, mask); } } static int comphy_lane_reg_poll(struct mvebu_a3700_comphy_lane *lane, u16 reg, u16 bits, ulong sleep_us, ulong timeout_us) { int ret; if (lane->id == 2) { u32 data; /* lane 2 PHY registers are accessed indirectly */ writel(reg + COMPHY_LANE2_REGS_BASE, lane->priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_ADDR); ret = readl_poll_timeout(lane->priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_DATA, data, (data & bits) == bits, sleep_us, timeout_us); } else { void __iomem *base = lane->id == 1 ? lane->priv->lane1_phy_regs : lane->priv->lane0_phy_regs; u16 data; ret = readw_poll_timeout(base + COMPHY_LANE_REG_DIRECT(reg), data, (data & bits) == bits, sleep_us, timeout_us); } return ret; } static void comphy_periph_reg_set(struct mvebu_a3700_comphy_lane *lane, u8 reg, u32 data, u32 mask) { comphy_reg_set(lane->priv->comphy_regs + COMPHY_PHY_REG(lane->id, reg), data, mask); } static int comphy_periph_reg_poll(struct mvebu_a3700_comphy_lane *lane, u8 reg, u32 bits, ulong sleep_us, ulong timeout_us) { u32 data; return readl_poll_timeout(lane->priv->comphy_regs + COMPHY_PHY_REG(lane->id, reg), data, (data & bits) == bits, sleep_us, timeout_us); } /* PHY selector configures with corresponding modes */ static int mvebu_a3700_comphy_set_phy_selector(struct mvebu_a3700_comphy_lane *lane) { u32 old, new, clr = 0, set = 0; unsigned long flags; switch (lane->mode) { case PHY_MODE_SATA: /* SATA must be in Lane2 */ if (lane->id == 2) clr = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; else goto error; break; case PHY_MODE_ETHERNET: if (lane->id == 0) clr = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; else if (lane->id == 1) clr = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; else goto error; break; case PHY_MODE_USB_HOST_SS: if (lane->id == 2) set = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; else if (lane->id == 0) set = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; else goto error; break; case PHY_MODE_PCIE: /* PCIE must be in Lane1 */ if (lane->id == 1) set = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; else goto error; break; default: goto error; } spin_lock_irqsave(&lane->priv->lock, flags); old = readl(lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); new = (old & ~clr) | set; writel(new, lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); spin_unlock_irqrestore(&lane->priv->lock, flags); dev_dbg(lane->dev, "COMPHY[%d] mode[%d] changed PHY selector 0x%08x -> 0x%08x\n", lane->id, lane->mode, old, new); return 0; error: dev_err(lane->dev, "COMPHY[%d] mode[%d] is invalid\n", lane->id, lane->mode); return -EINVAL; } static int mvebu_a3700_comphy_sata_power_on(struct mvebu_a3700_comphy_lane *lane) { u32 mask, data, ref_clk; int ret; /* Configure phy selector for SATA */ ret = mvebu_a3700_comphy_set_phy_selector(lane); if (ret) return ret; /* Clear phy isolation mode to make it work in normal mode */ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, 0x0, PHY_ISOLATE_MODE); /* 0. Check the Polarity invert bits */ data = 0x0; if (lane->invert_tx) data |= TXD_INVERT_BIT; if (lane->invert_rx) data |= RXD_INVERT_BIT; mask = TXD_INVERT_BIT | RXD_INVERT_BIT; comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); /* 1. Select 40-bit data width */ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, DATA_WIDTH_40BIT, SEL_DATA_WIDTH_MASK); /* 2. Select reference clock(25M) and PHY mode (SATA) */ if (lane->priv->xtal_is_40m) ref_clk = REF_FREF_SEL_SERDES_40MHZ; else ref_clk = REF_FREF_SEL_SERDES_25MHZ; data = ref_clk | COMPHY_MODE_SATA; mask = REF_FREF_SEL_MASK | COMPHY_MODE_MASK; comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); /* 3. Use maximum PLL rate (no power save) */ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, USE_MAX_PLL_RATE_BIT, USE_MAX_PLL_RATE_BIT); /* 4. Reset reserved bit */ comphy_set_indirect(lane->priv, COMPHY_RESERVED_REG, 0x0, PHYCTRL_FRM_PIN_BIT); /* 5. Set vendor-specific configuration (It is done in sata driver) */ /* XXX: in U-Boot below sequence was executed in this place, in Linux * not. Now it is done only in U-Boot before this comphy * initialization - tests shows that it works ok, but in case of any * future problem it is left for reference. * reg_set(MVEBU_REGS_BASE + 0xe00a0, 0, 0xffffffff); * reg_set(MVEBU_REGS_BASE + 0xe00a4, BIT(6), BIT(6)); */ /* Wait for > 55 us to allow PLL be enabled */ udelay(PLL_SET_DELAY_US); /* Polling status */ ret = comphy_lane_reg_poll(lane, COMPHY_DIG_LOOPBACK_EN, PLL_READY_TX_BIT, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) dev_err(lane->dev, "Failed to lock SATA PLL\n"); return ret; } static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane, bool is_1gbps) { int addr, fix_idx; u16 val; fix_idx = 0; for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) { /* * All PHY register values are defined in full for 3.125Gbps * SERDES speed. The values required for 1.25 Gbps are almost * the same and only few registers should be "fixed" in * comparison to 3.125 Gbps values. These register values are * stored in "gbe_phy_init_fix" array. */ if (!is_1gbps && fix_idx < ARRAY_SIZE(gbe_phy_init_fix) && gbe_phy_init_fix[fix_idx].addr == addr) { /* Use new value */ val = gbe_phy_init_fix[fix_idx].value; fix_idx++; } else { val = gbe_phy_init[addr]; } comphy_lane_reg_set(lane, addr, val, 0xFFFF); } } static int mvebu_a3700_comphy_ethernet_power_on(struct mvebu_a3700_comphy_lane *lane) { u32 mask, data, speed_sel; int ret; /* Set selector */ ret = mvebu_a3700_comphy_set_phy_selector(lane); if (ret) return ret; /* * 1. Reset PHY by setting PHY input port PIN_RESET=1. * 2. Set PHY input port PIN_TX_IDLE=1, PIN_PU_IVREF=1 to keep * PHY TXP/TXN output to idle state during PHY initialization * 3. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0. */ data = PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT | PIN_RESET_COMPHY_BIT; mask = data | PIN_RESET_CORE_BIT | PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT | PHY_RX_INIT_BIT; comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); /* 4. Release reset to the PHY by setting PIN_RESET=0. */ data = 0x0; mask = PIN_RESET_COMPHY_BIT; comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); /* * 5. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide COMPHY * bit rate */ switch (lane->submode) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: /* SGMII 1G, SerDes speed 1.25G */ speed_sel = SERDES_SPEED_1_25_G; break; case PHY_INTERFACE_MODE_2500BASEX: /* 2500Base-X, SerDes speed 3.125G */ speed_sel = SERDES_SPEED_3_125_G; break; default: /* Other rates are not supported */ dev_err(lane->dev, "unsupported phy speed %d on comphy lane%d\n", lane->submode, lane->id); return -EINVAL; } data = GEN_RX_SEL_VALUE(speed_sel) | GEN_TX_SEL_VALUE(speed_sel); mask = GEN_RX_SEL_MASK | GEN_TX_SEL_MASK; comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); /* * 6. Wait 10mS for bandgap and reference clocks to stabilize; then * start SW programming. */ mdelay(10); /* 7. Program COMPHY register PHY_MODE */ data = COMPHY_MODE_SERDES; mask = COMPHY_MODE_MASK; comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); /* * 8. Set COMPHY register REFCLK_SEL to select the correct REFCLK * source */ data = 0x0; mask = PHY_REF_CLK_SEL; comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); /* * 9. Set correct reference clock frequency in COMPHY register * REF_FREF_SEL. */ if (lane->priv->xtal_is_40m) data = REF_FREF_SEL_SERDES_50MHZ; else data = REF_FREF_SEL_SERDES_25MHZ; mask = REF_FREF_SEL_MASK; comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); /* * 10. Program COMPHY register PHY_GEN_MAX[1:0] * This step is mentioned in the flow received from verification team. * However the PHY_GEN_MAX value is only meaningful for other interfaces * (not SERDES). For instance, it selects SATA speed 1.5/3/6 Gbps or * PCIe speed 2.5/5 Gbps */ /* * 11. Program COMPHY register SEL_BITS to set correct parallel data * bus width */ data = DATA_WIDTH_10BIT; mask = SEL_DATA_WIDTH_MASK; comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, data, mask); /* * 12. As long as DFE function needs to be enabled in any mode, * COMPHY register DFE_UPDATE_EN[5:0] shall be programmed to 0x3F * for real chip during COMPHY power on. * The value of the DFE_UPDATE_EN already is 0x3F, because it is the * default value after reset of the PHY. */ /* * 13. Program COMPHY GEN registers. * These registers should be programmed based on the lab testing result * to achieve optimal performance. Please contact the CEA group to get * the related GEN table during real chip bring-up. We only required to * run though the entire registers programming flow defined by * "comphy_gbe_phy_init" when the REF clock is 40 MHz. For REF clock * 25 MHz the default values stored in PHY registers are OK. */ dev_dbg(lane->dev, "Running C-DPI phy init %s mode\n", lane->submode == PHY_INTERFACE_MODE_2500BASEX ? "2G5" : "1G"); if (lane->priv->xtal_is_40m) comphy_gbe_phy_init(lane, lane->submode != PHY_INTERFACE_MODE_2500BASEX); /* * 14. Check the PHY Polarity invert bit */ data = 0x0; if (lane->invert_tx) data |= TXD_INVERT_BIT; if (lane->invert_rx) data |= RXD_INVERT_BIT; mask = TXD_INVERT_BIT | RXD_INVERT_BIT; comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); /* * 15. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1 to * start PHY power up sequence. All the PHY register programming should * be done before PIN_PU_PLL=1. There should be no register programming * for normal PHY operation from this point. */ data = PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; mask = data; comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); /* * 16. Wait for PHY power up sequence to finish by checking output ports * PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1. */ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) { dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", lane->id); return ret; } /* * 17. Set COMPHY input port PIN_TX_IDLE=0 */ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, 0x0, PIN_TX_IDLE_BIT); /* * 18. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1. To * start RX initialization. PIN_RX_INIT_DONE will be cleared to 0 by the * PHY After RX initialization is done, PIN_RX_INIT_DONE will be set to * 1 by COMPHY Set PIN_RX_INIT=0 after PIN_RX_INIT_DONE= 1. Please * refer to RX initialization part for details. */ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, PHY_RX_INIT_BIT, PHY_RX_INIT_BIT); ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) { dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", lane->id); return ret; } ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, PHY_RX_INIT_DONE_BIT, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) dev_err(lane->dev, "Failed to init RX of SERDES PHY %d\n", lane->id); return ret; } static int mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane) { u32 mask, data, cfg, ref_clk; int ret; /* Set phy seclector */ ret = mvebu_a3700_comphy_set_phy_selector(lane); if (ret) return ret; /* COMPHY register reset (cleared automatically) */ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); /* * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The * register belong to UTMI module, so it is set in UTMI phy driver. */ /* * 1. Set PRD_TXDEEMPH (3.5db de-emph) */ data = PRD_TXDEEMPH0_MASK; mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK | CFG_TX_ALIGN_POS_MASK; comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG0, data, mask); /* * 2. Set BIT0: enable transmitter in high impedance mode * Set BIT[3:4]: delay 2 clock cycles for HiZ off latency * Set BIT6: Tx detect Rx at HiZ mode * Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db * together with bit 0 of COMPHY_PIPE_LANE_CFG0 register */ data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN; mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK | TX_ELEC_IDLE_MODE_EN; comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, data, mask); /* * 3. Set Spread Spectrum Clock Enabled */ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG4, SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN); /* * 4. Set Override Margining Controls From the MAC: * Use margining signals from lane configuration */ comphy_lane_reg_set(lane, COMPHY_PIPE_TEST_MODE_CTRL, MODE_MARGIN_OVERRIDE, 0xFFFF); /* * 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles * set Mode Clock Source = PCLK is generated from REFCLK */ data = 0x0; mask = MODE_CLK_SRC | BUNDLE_PERIOD_SEL | BUNDLE_PERIOD_SCALE_MASK | BUNDLE_SAMPLE_CTRL | PLL_READY_DLY_MASK; comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, data, mask); /* * 6. Set G2 Spread Spectrum Clock Amplitude at 4K */ comphy_lane_reg_set(lane, COMPHY_GEN2_SET2, GS2_TX_SSC_AMP_4128, GS2_TX_SSC_AMP_MASK); /* * 7. Unset G3 Spread Spectrum Clock Amplitude * set G3 TX and RX Register Master Current Select */ data = GS2_VREG_RXTX_MAS_ISET_60U; mask = GS2_TX_SSC_AMP_MASK | GS2_VREG_RXTX_MAS_ISET_MASK | GS2_RSVD_6_0_MASK; comphy_lane_reg_set(lane, COMPHY_GEN3_SET2, data, mask); /* * 8. Check crystal jumper setting and program the Power and PLL Control * accordingly Change RX wait */ if (lane->priv->xtal_is_40m) { ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; cfg = CFG_PM_RXDLOZ_WAIT_12_UNIT; } else { ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; cfg = CFG_PM_RXDLOZ_WAIT_7_UNIT; } data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_USB3 | ref_clk; mask = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | PU_TX_INTP_BIT | PU_DFE_BIT | PLL_LOCK_BIT | COMPHY_MODE_MASK | REF_FREF_SEL_MASK; comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); data = CFG_PM_RXDEN_WAIT_1_UNIT | cfg; mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | CFG_PM_RXDLOZ_WAIT_MASK; comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); /* * 9. Enable idle sync */ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, IDLE_SYNC_EN, IDLE_SYNC_EN); /* * 10. Enable the output of 500M clock */ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, CLK500M_EN, CLK500M_EN); /* * 11. Set 20-bit data width */ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, DATA_WIDTH_20BIT, 0xFFFF); /* * 12. Override Speed_PLL value and use MAC PLL */ data = SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT; mask = 0xFFFF; comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, data, mask); /* * 13. Check the Polarity invert bit */ data = 0x0; if (lane->invert_tx) data |= TXD_INVERT_BIT; if (lane->invert_rx) data |= RXD_INVERT_BIT; mask = TXD_INVERT_BIT | RXD_INVERT_BIT; comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); /* * 14. Set max speed generation to USB3.0 5Gbps */ comphy_lane_reg_set(lane, COMPHY_SYNC_MASK_GEN, PHY_GEN_MAX_USB3_5G, PHY_GEN_MAX_MASK); /* * 15. Set capacitor value for FFE gain peaking to 0xF */ comphy_lane_reg_set(lane, COMPHY_GEN2_SET3, GS3_FFE_CAP_SEL_VALUE, GS3_FFE_CAP_SEL_MASK); /* * 16. Release SW reset */ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4; mask = 0xFFFF; comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); /* Wait for > 55 us to allow PCLK be enabled */ udelay(PLL_SET_DELAY_US); ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) dev_err(lane->dev, "Failed to lock USB3 PLL\n"); return ret; } static int mvebu_a3700_comphy_pcie_power_on(struct mvebu_a3700_comphy_lane *lane) { u32 mask, data, ref_clk; int ret; /* Configure phy selector for PCIe */ ret = mvebu_a3700_comphy_set_phy_selector(lane); if (ret) return ret; /* 1. Enable max PLL. */ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, USE_MAX_PLL_RATE_EN, USE_MAX_PLL_RATE_EN); /* 2. Select 20 bit SERDES interface. */ comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, CFG_SEL_20B, CFG_SEL_20B); /* 3. Force to use reg setting for PCIe mode */ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL1, SEL_BITS_PCIE_FORCE, SEL_BITS_PCIE_FORCE); /* 4. Change RX wait */ data = CFG_PM_RXDEN_WAIT_1_UNIT | CFG_PM_RXDLOZ_WAIT_12_UNIT; mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | CFG_PM_RXDLOZ_WAIT_MASK; comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); /* 5. Enable idle sync */ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, IDLE_SYNC_EN, IDLE_SYNC_EN); /* 6. Enable the output of 100M/125M/500M clock */ data = CLK500M_EN | TXDCLK_2X_SEL | CLK100M_125M_EN; mask = data; comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); /* * 7. Enable TX, PCIE global register, 0xd0074814, it is done in * PCI-E driver */ /* * 8. Check crystal jumper setting and program the Power and PLL * Control accordingly */ if (lane->priv->xtal_is_40m) ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; else ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_PCIE | ref_clk; mask = 0xFFFF; comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); /* 9. Override Speed_PLL value and use MAC PLL */ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT, 0xFFFF); /* 10. Check the Polarity invert bit */ data = 0x0; if (lane->invert_tx) data |= TXD_INVERT_BIT; if (lane->invert_rx) data |= RXD_INVERT_BIT; mask = TXD_INVERT_BIT | RXD_INVERT_BIT; comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); /* 11. Release SW reset */ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32; mask = data | PIPE_SOFT_RESET | MODE_REFDIV_MASK; comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); /* Wait for > 55 us to allow PCLK be enabled */ udelay(PLL_SET_DELAY_US); ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); if (ret) dev_err(lane->dev, "Failed to lock PCIE PLL\n"); return ret; } static void mvebu_a3700_comphy_sata_power_off(struct mvebu_a3700_comphy_lane *lane) { /* Set phy isolation mode */ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, PHY_ISOLATE_MODE, PHY_ISOLATE_MODE); /* Power off PLL, Tx, Rx */ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); } static void mvebu_a3700_comphy_ethernet_power_off(struct mvebu_a3700_comphy_lane *lane) { u32 mask, data; data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | PIN_PU_IVREF_BIT | PHY_RX_INIT_BIT; mask = data; comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); } static void mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane) { /* Power off PLL, Tx, Rx */ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); } static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane) { /* * The USB3 MAC sets the USB3 PHY to low state, so we do not * need to power off USB3 PHY again. */ } static bool mvebu_a3700_comphy_check_mode(int lane, enum phy_mode mode, int submode) { int i, n = ARRAY_SIZE(mvebu_a3700_comphy_modes); /* Unused PHY mux value is 0x0 */ if (mode == PHY_MODE_INVALID) return false; for (i = 0; i < n; i++) { if (mvebu_a3700_comphy_modes[i].lane == lane && mvebu_a3700_comphy_modes[i].mode == mode && mvebu_a3700_comphy_modes[i].submode == submode) break; } if (i == n) return false; return true; } static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, int submode) { struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); if (!mvebu_a3700_comphy_check_mode(lane->id, mode, submode)) { dev_err(lane->dev, "invalid COMPHY mode\n"); return -EINVAL; } /* Mode cannot be changed while the PHY is powered on */ if (phy->power_count && (lane->mode != mode || lane->submode != submode)) return -EBUSY; /* Just remember the mode, ->power_on() will do the real setup */ lane->mode = mode; lane->submode = submode; return 0; } static int mvebu_a3700_comphy_power_on(struct phy *phy) { struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, lane->submode)) { dev_err(lane->dev, "invalid COMPHY mode\n"); return -EINVAL; } switch (lane->mode) { case PHY_MODE_USB_HOST_SS: dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); return mvebu_a3700_comphy_usb3_power_on(lane); case PHY_MODE_SATA: dev_dbg(lane->dev, "set lane %d to SATA mode\n", lane->id); return mvebu_a3700_comphy_sata_power_on(lane); case PHY_MODE_ETHERNET: dev_dbg(lane->dev, "set lane %d to Ethernet mode\n", lane->id); return mvebu_a3700_comphy_ethernet_power_on(lane); case PHY_MODE_PCIE: dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); return mvebu_a3700_comphy_pcie_power_on(lane); default: dev_err(lane->dev, "unsupported PHY mode (%d)\n", lane->mode); return -EOPNOTSUPP; } } static int mvebu_a3700_comphy_power_off(struct phy *phy) { struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); switch (lane->id) { case 0: mvebu_a3700_comphy_usb3_power_off(lane); mvebu_a3700_comphy_ethernet_power_off(lane); return 0; case 1: mvebu_a3700_comphy_pcie_power_off(lane); mvebu_a3700_comphy_ethernet_power_off(lane); return 0; case 2: mvebu_a3700_comphy_usb3_power_off(lane); mvebu_a3700_comphy_sata_power_off(lane); return 0; default: dev_err(lane->dev, "invalid COMPHY mode\n"); return -EINVAL; } } static const struct phy_ops mvebu_a3700_comphy_ops = { .power_on = mvebu_a3700_comphy_power_on, .power_off = mvebu_a3700_comphy_power_off, .set_mode = mvebu_a3700_comphy_set_mode, .owner = THIS_MODULE, }; static struct phy *mvebu_a3700_comphy_xlate(struct device *dev, const struct of_phandle_args *args) { struct mvebu_a3700_comphy_lane *lane; unsigned int port; struct phy *phy; phy = of_phy_simple_xlate(dev, args); if (IS_ERR(phy)) return phy; lane = phy_get_drvdata(phy); port = args->args[0]; if (port != 0 && (port != 1 || lane->id != 0)) { dev_err(lane->dev, "invalid port number %u\n", port); return ERR_PTR(-EINVAL); } lane->invert_tx = args->args[1] & BIT(0); lane->invert_rx = args->args[1] & BIT(1); return phy; } static int mvebu_a3700_comphy_probe(struct platform_device *pdev) { struct mvebu_a3700_comphy_priv *priv; struct phy_provider *provider; struct device_node *child; struct resource *res; struct clk *clk; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "comphy"); priv->comphy_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->comphy_regs)) return PTR_ERR(priv->comphy_regs); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lane1_pcie_gbe"); priv->lane1_phy_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->lane1_phy_regs)) return PTR_ERR(priv->lane1_phy_regs); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lane0_usb3_gbe"); priv->lane0_phy_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->lane0_phy_regs)) return PTR_ERR(priv->lane0_phy_regs); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lane2_sata_usb3"); priv->lane2_phy_indirect = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->lane2_phy_indirect)) return PTR_ERR(priv->lane2_phy_indirect); /* * Driver needs to know if reference xtal clock is 40MHz or 25MHz. * Old DT bindings do not have xtal clk present. So do not fail here * and expects that default 25MHz reference clock is used. */ clk = clk_get(&pdev->dev, "xtal"); if (IS_ERR(clk)) { if (PTR_ERR(clk) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(&pdev->dev, "missing 'xtal' clk (%ld)\n", PTR_ERR(clk)); } else { ret = clk_prepare_enable(clk); if (ret) { dev_warn(&pdev->dev, "enabling xtal clk failed (%d)\n", ret); } else { if (clk_get_rate(clk) == 40000000) priv->xtal_is_40m = true; clk_disable_unprepare(clk); } clk_put(clk); } dev_set_drvdata(&pdev->dev, priv); for_each_available_child_of_node(pdev->dev.of_node, child) { struct mvebu_a3700_comphy_lane *lane; struct phy *phy; int ret; u32 lane_id; ret = of_property_read_u32(child, "reg", &lane_id); if (ret < 0) { dev_err(&pdev->dev, "missing 'reg' property (%d)\n", ret); continue; } if (lane_id >= 3) { dev_err(&pdev->dev, "invalid 'reg' property\n"); continue; } lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL); if (!lane) { of_node_put(child); return -ENOMEM; } phy = devm_phy_create(&pdev->dev, child, &mvebu_a3700_comphy_ops); if (IS_ERR(phy)) { of_node_put(child); return PTR_ERR(phy); } lane->priv = priv; lane->dev = &pdev->dev; lane->mode = PHY_MODE_INVALID; lane->submode = PHY_INTERFACE_MODE_NA; lane->id = lane_id; lane->invert_tx = false; lane->invert_rx = false; phy_set_drvdata(phy, lane); /* * To avoid relying on the bootloader/firmware configuration, * power off all comphys. */ mvebu_a3700_comphy_power_off(phy); } provider = devm_of_phy_provider_register(&pdev->dev, mvebu_a3700_comphy_xlate); return PTR_ERR_OR_ZERO(provider); } static const struct of_device_id mvebu_a3700_comphy_of_match_table[] = { { .compatible = "marvell,comphy-a3700" }, { }, }; MODULE_DEVICE_TABLE(of, mvebu_a3700_comphy_of_match_table); static struct platform_driver mvebu_a3700_comphy_driver = { .probe = mvebu_a3700_comphy_probe, .driver = { .name = "mvebu-a3700-comphy", .of_match_table = mvebu_a3700_comphy_of_match_table, }, }; module_platform_driver(mvebu_a3700_comphy_driver); MODULE_AUTHOR("Miquèl Raynal <[email protected]>"); MODULE_AUTHOR("Pali Rohár <[email protected]>"); MODULE_AUTHOR("Marek Behún <[email protected]>"); MODULE_DESCRIPTION("Common PHY driver for A3700"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2024 Josua Mayer <[email protected]> * * DTS for SolidRun CN9130 Clearfog Base. * */ /dts-v1/; #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> #include "cn9130.dtsi" #include "cn9130-sr-som.dtsi" #include "cn9130-cf.dtsi" / { model = "SolidRun CN9130 Clearfog Base"; compatible = "solidrun,cn9130-clearfog-base", "solidrun,cn9130-sr-som", "marvell,cn9130"; gpio-keys { compatible = "gpio-keys"; pinctrl-0 = <&rear_button_pins>; pinctrl-names = "default"; button-0 { /* The rear SW3 button */ label = "Rear Button"; gpios = <&cp0_gpio1 31 GPIO_ACTIVE_LOW>; linux,can-disable; linux,code = <BTN_0>; }; }; rfkill-m2-gnss { compatible = "rfkill-gpio"; label = "m.2 GNSS"; radio-type = "gps"; /* rfkill-gpio inverts internally */ shutdown-gpios = <&expander0 9 GPIO_ACTIVE_HIGH>; }; /* M.2 is B-keyed, so w-disable is for WWAN */ rfkill-m2-wwan { compatible = "rfkill-gpio"; label = "m.2 WWAN"; radio-type = "wwan"; /* rfkill-gpio inverts internally */ shutdown-gpios = <&expander0 8 GPIO_ACTIVE_HIGH>; }; }; /* SRDS #3 - SGMII 1GE */ &cp0_eth1 { phy = <&phy1>; phys = <&cp0_comphy3 1>; phy-mode = "sgmii"; status = "okay"; }; &cp0_eth2_phy { /* * Configure LEDs default behaviour: * - LED[0]: link/activity: On/blink (green) * - LED[1]: link is 100/1000Mbps: On (yellow) * - LED[2]: high impedance (floating) */ marvell,reg-init = <3 16 0xf000 0x0a61>; leds { #address-cells = <1>; #size-cells = <0>; led@0 { reg = <0>; color = <LED_COLOR_ID_GREEN>; function = LED_FUNCTION_WAN; default-state = "keep"; }; led@1 { reg = <1>; color = <LED_COLOR_ID_YELLOW>; function = LED_FUNCTION_WAN; default-state = "keep"; }; }; }; &cp0_gpio1 { sim-select-hog { gpio-hog; gpios = <27 GPIO_ACTIVE_HIGH>; output-high; line-name = "sim-select"; }; }; &cp0_mdio { phy1: ethernet-phy@1 { reg = <1>; /* * Configure LEDs default behaviour: * - LED[0]: link/activity: On/blink (green) * - LED[1]: link is 100/1000Mbps: On (yellow) * - LED[2]: high impedance (floating) * * Configure LEDs electrical polarity * - on-state: low * - off-state: high (not hi-z, to avoid residual glow) */ marvell,reg-init = <3 16 0xf000 0x0a61>, <3 17 0x003f 0x000a>; leds { #address-cells = <1>; #size-cells = <0>; led@0 { reg = <0>; color = <LED_COLOR_ID_GREEN>; function = LED_FUNCTION_LAN; default-state = "keep"; }; led@1 { reg = <1>; color = <LED_COLOR_ID_YELLOW>; function = LED_FUNCTION_LAN; default-state = "keep"; }; }; }; }; &cp0_pinctrl { pinctrl-0 = <&sim_select_pins>; pintrl-names = "default"; rear_button_pins: cp0-rear-button-pins { marvell,pins = "mpp31"; marvell,function = "gpio"; }; sim_select_pins: cp0-sim-select-pins { marvell,pins = "mpp27"; marvell,function = "gpio"; }; }; /* * SRDS #4 - USB 3.0 host on M.2 connector * USB-2.0 Host on Type-A connector */ &cp0_usb3_1 { phys = <&cp0_comphy4 1>, <&cp0_utmi1>; phy-names = "comphy", "utmi"; dr_mode = "host"; status = "okay"; }; &expander0 { m2-full-card-power-off-hog { gpio-hog; gpios = <2 GPIO_ACTIVE_LOW>; output-low; line-name = "m2-full-card-power-off"; }; m2-reset-hog { gpio-hog; gpios = <10 GPIO_ACTIVE_LOW>; output-low; line-name = "m2-reset"; }; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * mt7986-reg.h -- MediaTek 7986 audio driver reg definition * * Copyright (c) 2023 MediaTek Inc. * Authors: Vic Wu <[email protected]> * Maso Huang <[email protected]> */ #ifndef _MT7986_REG_H_ #define _MT7986_REG_H_ #define AUDIO_TOP_CON2 0x0008 #define AUDIO_TOP_CON4 0x0010 #define AUDIO_ENGEN_CON0 0x0014 #define AFE_IRQ_MCU_EN 0x0100 #define AFE_IRQ_MCU_STATUS 0x0120 #define AFE_IRQ_MCU_CLR 0x0128 #define AFE_IRQ0_MCU_CFG0 0x0140 #define AFE_IRQ0_MCU_CFG1 0x0144 #define AFE_IRQ1_MCU_CFG0 0x0148 #define AFE_IRQ1_MCU_CFG1 0x014c #define AFE_IRQ2_MCU_CFG0 0x0150 #define AFE_IRQ2_MCU_CFG1 0x0154 #define ETDM_IN5_CON0 0x13f0 #define ETDM_IN5_CON1 0x13f4 #define ETDM_IN5_CON2 0x13f8 #define ETDM_IN5_CON3 0x13fc #define ETDM_IN5_CON4 0x1400 #define ETDM_OUT5_CON0 0x1570 #define ETDM_OUT5_CON4 0x1580 #define ETDM_OUT5_CON5 0x1584 #define ETDM_4_7_COWORK_CON0 0x15e0 #define ETDM_4_7_COWORK_CON1 0x15e4 #define AFE_CONN018_1 0x1b44 #define AFE_CONN018_4 0x1b50 #define AFE_CONN019_1 0x1b64 #define AFE_CONN019_4 0x1b70 #define AFE_CONN124_1 0x2884 #define AFE_CONN124_4 0x2890 #define AFE_CONN125_1 0x28a4 #define AFE_CONN125_4 0x28b0 #define AFE_CONN_RS_0 0x3920 #define AFE_CONN_RS_3 0x392c #define AFE_CONN_16BIT_0 0x3960 #define AFE_CONN_16BIT_3 0x396c #define AFE_CONN_24BIT_0 0x3980 #define AFE_CONN_24BIT_3 0x398c #define AFE_MEMIF_CON0 0x3d98 #define AFE_MEMIF_RD_MON 0x3da0 #define AFE_MEMIF_WR_MON 0x3da4 #define AFE_DL0_BASE_MSB 0x3e40 #define AFE_DL0_BASE 0x3e44 #define AFE_DL0_CUR_MSB 0x3e48 #define AFE_DL0_CUR 0x3e4c #define AFE_DL0_END_MSB 0x3e50 #define AFE_DL0_END 0x3e54 #define AFE_DL0_RCH_MON 0x3e58 #define AFE_DL0_LCH_MON 0x3e5c #define AFE_DL0_CON0 0x3e60 #define AFE_VUL0_BASE_MSB 0x4220 #define AFE_VUL0_BASE 0x4224 #define AFE_VUL0_CUR_MSB 0x4228 #define AFE_VUL0_CUR 0x422c #define AFE_VUL0_END_MSB 0x4230 #define AFE_VUL0_END 0x4234 #define AFE_VUL0_CON0 0x4238 #define AFE_MAX_REGISTER AFE_VUL0_CON0 #define AFE_IRQ_STATUS_BITS 0x7 #define AFE_IRQ_CNT_SHIFT 0 #define AFE_IRQ_CNT_MASK 0xffffff /* AUDIO_TOP_CON2 */ #define CLK_OUT5_PDN BIT(14) #define CLK_OUT5_PDN_MASK BIT(14) #define CLK_IN5_PDN BIT(7) #define CLK_IN5_PDN_MASK BIT(7) /* AUDIO_TOP_CON4 */ #define PDN_APLL_TUNER2 BIT(12) #define PDN_APLL_TUNER2_MASK BIT(12) /* AUDIO_ENGEN_CON0 */ #define AUD_APLL2_EN BIT(3) #define AUD_APLL2_EN_MASK BIT(3) #define AUD_26M_EN BIT(0) #define AUD_26M_EN_MASK BIT(0) /* AFE_DL0_CON0 */ #define DL0_ON_SFT 28 #define DL0_ON_MASK 0x1 #define DL0_ON_MASK_SFT BIT(28) #define DL0_MINLEN_SFT 20 #define DL0_MINLEN_MASK 0xf #define DL0_MINLEN_MASK_SFT (0xf << 20) #define DL0_MODE_SFT 8 #define DL0_MODE_MASK 0x1f #define DL0_MODE_MASK_SFT (0x1f << 8) #define DL0_PBUF_SIZE_SFT 5 #define DL0_PBUF_SIZE_MASK 0x3 #define DL0_PBUF_SIZE_MASK_SFT (0x3 << 5) #define DL0_MONO_SFT 4 #define DL0_MONO_MASK 0x1 #define DL0_MONO_MASK_SFT BIT(4) #define DL0_HALIGN_SFT 2 #define DL0_HALIGN_MASK 0x1 #define DL0_HALIGN_MASK_SFT BIT(2) #define DL0_HD_MODE_SFT 0 #define DL0_HD_MODE_MASK 0x3 #define DL0_HD_MODE_MASK_SFT (0x3 << 0) /* AFE_VUL0_CON0 */ #define VUL0_ON_SFT 28 #define VUL0_ON_MASK 0x1 #define VUL0_ON_MASK_SFT BIT(28) #define VUL0_MODE_SFT 8 #define VUL0_MODE_MASK 0x1f #define VUL0_MODE_MASK_SFT (0x1f << 8) #define VUL0_MONO_SFT 4 #define VUL0_MONO_MASK 0x1 #define VUL0_MONO_MASK_SFT BIT(4) #define VUL0_HALIGN_SFT 2 #define VUL0_HALIGN_MASK 0x1 #define VUL0_HALIGN_MASK_SFT BIT(2) #define VUL0_HD_MODE_SFT 0 #define VUL0_HD_MODE_MASK 0x3 #define VUL0_HD_MODE_MASK_SFT (0x3 << 0) /* AFE_IRQ_MCU_CON */ #define IRQ_MCU_MODE_SFT 4 #define IRQ_MCU_MODE_MASK 0x1f #define IRQ_MCU_MODE_MASK_SFT (0x1f << 4) #define IRQ_MCU_ON_SFT 0 #define IRQ_MCU_ON_MASK 0x1 #define IRQ_MCU_ON_MASK_SFT BIT(0) #define IRQ0_MCU_CLR_SFT 0 #define IRQ0_MCU_CLR_MASK 0x1 #define IRQ0_MCU_CLR_MASK_SFT BIT(0) #define IRQ1_MCU_CLR_SFT 1 #define IRQ1_MCU_CLR_MASK 0x1 #define IRQ1_MCU_CLR_MASK_SFT BIT(1) #define IRQ2_MCU_CLR_SFT 2 #define IRQ2_MCU_CLR_MASK 0x1 #define IRQ2_MCU_CLR_MASK_SFT BIT(2) /* ETDM_IN5_CON2 */ #define IN_CLK_SRC(x) ((x) << 10) #define IN_CLK_SRC_SFT 10 #define IN_CLK_SRC_MASK GENMASK(12, 10) /* ETDM_IN5_CON3 */ #define IN_SEL_FS(x) ((x) << 26) #define IN_SEL_FS_SFT 26 #define IN_SEL_FS_MASK GENMASK(30, 26) /* ETDM_IN5_CON4 */ #define IN_RELATCH(x) ((x) << 20) #define IN_RELATCH_SFT 20 #define IN_RELATCH_MASK GENMASK(24, 20) #define IN_CLK_INV BIT(18) #define IN_CLK_INV_MASK BIT(18) /* ETDM_IN5_CON0 & ETDM_OUT5_CON0 */ #define RELATCH_SRC_MASK GENMASK(30, 28) #define ETDM_CH_NUM_MASK GENMASK(27, 23) #define ETDM_WRD_LEN_MASK GENMASK(20, 16) #define ETDM_BIT_LEN_MASK GENMASK(15, 11) #define ETDM_FMT_MASK GENMASK(8, 6) #define ETDM_SYNC BIT(1) #define ETDM_SYNC_MASK BIT(1) #define ETDM_EN BIT(0) #define ETDM_EN_MASK BIT(0) /* ETDM_OUT5_CON4 */ #define OUT_RELATCH(x) ((x) << 24) #define OUT_RELATCH_SFT 24 #define OUT_RELATCH_MASK GENMASK(28, 24) #define OUT_CLK_SRC(x) ((x) << 6) #define OUT_CLK_SRC_SFT 6 #define OUT_CLK_SRC_MASK GENMASK(8, 6) #define OUT_SEL_FS(x) (x) #define OUT_SEL_FS_SFT 0 #define OUT_SEL_FS_MASK GENMASK(4, 0) /* ETDM_OUT5_CON5 */ #define ETDM_CLK_DIV BIT(12) #define ETDM_CLK_DIV_MASK BIT(12) #define OUT_CLK_INV BIT(9) #define OUT_CLK_INV_MASK BIT(9) /* ETDM_4_7_COWORK_CON0 */ #define OUT_SEL(x) ((x) << 12) #define OUT_SEL_SFT 12 #define OUT_SEL_MASK GENMASK(15, 12) #endif
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2013 Freescale Semiconductor, Inc. * * Author: Fabio Estevam <[email protected]> */ /dts-v1/; #include "imx6q.dtsi" #include "imx6qdl-wandboard-revc1.dtsi" / { model = "Wandboard i.MX6 Quad Board"; compatible = "wand,imx6q-wandboard", "fsl,imx6q"; memory@10000000 { device_type = "memory"; reg = <0x10000000 0x80000000>; }; }; &sata { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0 #ifndef _DECOMPRESSOR_H #define _DECOMPRESSOR_H /* The linker tells us where the image is. */ extern unsigned char __image_begin[], __image_end[]; /* debug interfaces */ #ifdef CONFIG_DEBUG_ZBOOT extern void putc(char c); extern void puts(const char *s); extern void puthex(unsigned long long val); #else #define putc(s) do {} while (0) #define puts(s) do {} while (0) #define puthex(val) do {} while (0) #endif extern char __appended_dtb[]; void error(char *x); void decompress_kernel(unsigned long boot_heap_start); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Daniel Tang <[email protected]> */ /dts-v1/; #include <dt-bindings/input/input.h> /include/ "nspire-classic.dtsi" &keypad { linux,keymap = < MATRIX_KEY(0, 0, 0x1c) MATRIX_KEY(0, 1, 0x1c) MATRIX_KEY(0, 4, 0x39) MATRIX_KEY(0, 5, 0x2c) MATRIX_KEY(0, 6, 0x15) MATRIX_KEY(0, 7, 0x0b) MATRIX_KEY(0, 8, 0x0f) MATRIX_KEY(1, 0, 0x2d) MATRIX_KEY(1, 1, 0x11) MATRIX_KEY(1, 2, 0x2f) MATRIX_KEY(1, 3, 0x04) MATRIX_KEY(1, 4, 0x16) MATRIX_KEY(1, 5, 0x14) MATRIX_KEY(1, 6, 0x1f) MATRIX_KEY(1, 7, 0x02) MATRIX_KEY(1, 10, 0x6a) MATRIX_KEY(2, 0, 0x13) MATRIX_KEY(2, 1, 0x10) MATRIX_KEY(2, 2, 0x19) MATRIX_KEY(2, 3, 0x07) MATRIX_KEY(2, 4, 0x18) MATRIX_KEY(2, 5, 0x31) MATRIX_KEY(2, 6, 0x32) MATRIX_KEY(2, 7, 0x05) MATRIX_KEY(2, 8, 0x28) MATRIX_KEY(2, 9, 0x6c) MATRIX_KEY(3, 0, 0x26) MATRIX_KEY(3, 1, 0x25) MATRIX_KEY(3, 2, 0x24) MATRIX_KEY(3, 3, 0x0a) MATRIX_KEY(3, 4, 0x17) MATRIX_KEY(3, 5, 0x23) MATRIX_KEY(3, 6, 0x22) MATRIX_KEY(3, 7, 0x08) MATRIX_KEY(3, 8, 0x35) MATRIX_KEY(3, 9, 0x69) MATRIX_KEY(4, 0, 0x21) MATRIX_KEY(4, 1, 0x12) MATRIX_KEY(4, 2, 0x20) MATRIX_KEY(4, 4, 0x2e) MATRIX_KEY(4, 5, 0x30) MATRIX_KEY(4, 6, 0x1e) MATRIX_KEY(4, 7, 0x0d) MATRIX_KEY(4, 8, 0x37) MATRIX_KEY(4, 9, 0x67) MATRIX_KEY(5, 1, 0x38) MATRIX_KEY(5, 2, 0x0c) MATRIX_KEY(5, 3, 0x1b) MATRIX_KEY(5, 4, 0x34) MATRIX_KEY(5, 5, 0x1a) MATRIX_KEY(5, 6, 0x06) MATRIX_KEY(5, 8, 0x27) MATRIX_KEY(5, 9, 0x0e) MATRIX_KEY(5, 10, 0x6f) MATRIX_KEY(6, 0, 0x2b) MATRIX_KEY(6, 2, 0x4e) MATRIX_KEY(6, 3, 0x68) MATRIX_KEY(6, 4, 0x03) MATRIX_KEY(6, 5, 0x6d) MATRIX_KEY(6, 6, 0x09) MATRIX_KEY(6, 7, 0x01) MATRIX_KEY(6, 9, 0x0f) MATRIX_KEY(7, 8, 0x2a) MATRIX_KEY(7, 9, 0x1d) MATRIX_KEY(7, 10, 0x33) >; }; / { model = "TI-NSPIRE Touchpad"; compatible = "ti,nspire-tp"; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2004 * * Author: Martin Schwidefsky <[email protected]> */ #ifndef _S390_CPUTIME_H #define _S390_CPUTIME_H #include <linux/types.h> #include <asm/timex.h> /* * Convert cputime to nanoseconds. */ #define cputime_to_nsecs(cputime) tod_to_ns(cputime) void account_idle_time_irq(void); #endif /* _S390_CPUTIME_H */
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. * Copyright (c) 2022, Linaro Limited */ #ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H #define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H /* Clocks */ #define DISP_CC_PLL0 0 #define DISP_CC_MDSS_AHB_CLK 1 #define DISP_CC_MDSS_AHB_CLK_SRC 2 #define DISP_CC_MDSS_BYTE0_CLK 3 #define DISP_CC_MDSS_BYTE0_CLK_SRC 4 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5 #define DISP_CC_MDSS_BYTE0_INTF_CLK 6 #define DISP_CC_MDSS_ESC0_CLK 7 #define DISP_CC_MDSS_ESC0_CLK_SRC 8 #define DISP_CC_MDSS_MDP_CLK 9 #define DISP_CC_MDSS_MDP_CLK_SRC 10 #define DISP_CC_MDSS_MDP_LUT_CLK 11 #define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12 #define DISP_CC_MDSS_PCLK0_CLK 13 #define DISP_CC_MDSS_PCLK0_CLK_SRC 14 #define DISP_CC_MDSS_ROT_CLK 15 #define DISP_CC_MDSS_ROT_CLK_SRC 16 #define DISP_CC_MDSS_RSCC_AHB_CLK 17 #define DISP_CC_MDSS_RSCC_VSYNC_CLK 18 #define DISP_CC_MDSS_VSYNC_CLK 19 #define DISP_CC_MDSS_VSYNC_CLK_SRC 20 #define DISP_CC_SLEEP_CLK 21 #define DISP_CC_XO_CLK 22 /* Resets */ #define DISP_CC_MDSS_CORE_BCR 0 #define DISP_CC_MDSS_RSCC_BCR 1 /* GDSCs */ #define MDSS_GDSC 0 #endif
// SPDX-License-Identifier: GPL-2.0 /* * Intel Sunrisepoint PCH pinctrl/GPIO driver * * Copyright (C) 2015, Intel Corporation * Authors: Mathias Nyman <[email protected]> * Mika Westerberg <[email protected]> */ #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-intel.h" #define SPT_H_PAD_OWN 0x020 #define SPT_H_PADCFGLOCK 0x090 #define SPT_H_HOSTSW_OWN 0x0d0 #define SPT_H_GPI_IS 0x100 #define SPT_H_GPI_IE 0x120 #define SPT_LP_PAD_OWN 0x020 #define SPT_LP_PADCFGLOCK 0x0a0 #define SPT_LP_HOSTSW_OWN 0x0d0 #define SPT_LP_GPI_IS 0x100 #define SPT_LP_GPI_IE 0x120 #define SPT_H_GPP(r, s, e, g) \ { \ .reg_num = (r), \ .base = (s), \ .size = ((e) - (s) + 1), \ .gpio_base = (g), \ } #define SPT_H_COMMUNITY(b, s, e, g) \ INTEL_COMMUNITY_GPPS(b, s, e, g, SPT_H) #define SPT_LP_COMMUNITY(b, s, e) \ INTEL_COMMUNITY_SIZE(b, s, e, 24, 4, SPT_LP) /* Sunrisepoint-LP */ static const struct pinctrl_pin_desc sptlp_pins[] = { /* GPP_A */ PINCTRL_PIN(0, "RCINB"), PINCTRL_PIN(1, "LAD_0"), PINCTRL_PIN(2, "LAD_1"), PINCTRL_PIN(3, "LAD_2"), PINCTRL_PIN(4, "LAD_3"), PINCTRL_PIN(5, "LFRAMEB"), PINCTRL_PIN(6, "SERIQ"), PINCTRL_PIN(7, "PIRQAB"), PINCTRL_PIN(8, "CLKRUNB"), PINCTRL_PIN(9, "CLKOUT_LPC_0"), PINCTRL_PIN(10, "CLKOUT_LPC_1"), PINCTRL_PIN(11, "PMEB"), PINCTRL_PIN(12, "BM_BUSYB"), PINCTRL_PIN(13, "SUSWARNB_SUS_PWRDNACK"), PINCTRL_PIN(14, "SUS_STATB"), PINCTRL_PIN(15, "SUSACKB"), PINCTRL_PIN(16, "SD_1P8_SEL"), PINCTRL_PIN(17, "SD_PWR_EN_B"), PINCTRL_PIN(18, "ISH_GP_0"), PINCTRL_PIN(19, "ISH_GP_1"), PINCTRL_PIN(20, "ISH_GP_2"), PINCTRL_PIN(21, "ISH_GP_3"), PINCTRL_PIN(22, "ISH_GP_4"), PINCTRL_PIN(23, "ISH_GP_5"), /* GPP_B */ PINCTRL_PIN(24, "CORE_VID_0"), PINCTRL_PIN(25, "CORE_VID_1"), PINCTRL_PIN(26, "VRALERTB"), PINCTRL_PIN(27, "CPU_GP_2"), PINCTRL_PIN(28, "CPU_GP_3"), PINCTRL_PIN(29, "SRCCLKREQB_0"), PINCTRL_PIN(30, "SRCCLKREQB_1"), PINCTRL_PIN(31, "SRCCLKREQB_2"), PINCTRL_PIN(32, "SRCCLKREQB_3"), PINCTRL_PIN(33, "SRCCLKREQB_4"), PINCTRL_PIN(34, "SRCCLKREQB_5"), PINCTRL_PIN(35, "EXT_PWR_GATEB"), PINCTRL_PIN(36, "SLP_S0B"), PINCTRL_PIN(37, "PLTRSTB"), PINCTRL_PIN(38, "SPKR"), PINCTRL_PIN(39, "GSPI0_CSB"), PINCTRL_PIN(40, "GSPI0_CLK"), PINCTRL_PIN(41, "GSPI0_MISO"), PINCTRL_PIN(42, "GSPI0_MOSI"), PINCTRL_PIN(43, "GSPI1_CSB"), PINCTRL_PIN(44, "GSPI1_CLK"), PINCTRL_PIN(45, "GSPI1_MISO"), PINCTRL_PIN(46, "GSPI1_MOSI"), PINCTRL_PIN(47, "SML1ALERTB"), /* GPP_C */ PINCTRL_PIN(48, "SMBCLK"), PINCTRL_PIN(49, "SMBDATA"), PINCTRL_PIN(50, "SMBALERTB"), PINCTRL_PIN(51, "SML0CLK"), PINCTRL_PIN(52, "SML0DATA"), PINCTRL_PIN(53, "SML0ALERTB"), PINCTRL_PIN(54, "SML1CLK"), PINCTRL_PIN(55, "SML1DATA"), PINCTRL_PIN(56, "UART0_RXD"), PINCTRL_PIN(57, "UART0_TXD"), PINCTRL_PIN(58, "UART0_RTSB"), PINCTRL_PIN(59, "UART0_CTSB"), PINCTRL_PIN(60, "UART1_RXD"), PINCTRL_PIN(61, "UART1_TXD"), PINCTRL_PIN(62, "UART1_RTSB"), PINCTRL_PIN(63, "UART1_CTSB"), PINCTRL_PIN(64, "I2C0_SDA"), PINCTRL_PIN(65, "I2C0_SCL"), PINCTRL_PIN(66, "I2C1_SDA"), PINCTRL_PIN(67, "I2C1_SCL"), PINCTRL_PIN(68, "UART2_RXD"), PINCTRL_PIN(69, "UART2_TXD"), PINCTRL_PIN(70, "UART2_RTSB"), PINCTRL_PIN(71, "UART2_CTSB"), /* GPP_D */ PINCTRL_PIN(72, "SPI1_CSB"), PINCTRL_PIN(73, "SPI1_CLK"), PINCTRL_PIN(74, "SPI1_MISO_IO_1"), PINCTRL_PIN(75, "SPI1_MOSI_IO_0"), PINCTRL_PIN(76, "FLASHTRIG"), PINCTRL_PIN(77, "ISH_I2C0_SDA"), PINCTRL_PIN(78, "ISH_I2C0_SCL"), PINCTRL_PIN(79, "ISH_I2C1_SDA"), PINCTRL_PIN(80, "ISH_I2C1_SCL"), PINCTRL_PIN(81, "ISH_SPI_CSB"), PINCTRL_PIN(82, "ISH_SPI_CLK"), PINCTRL_PIN(83, "ISH_SPI_MISO"), PINCTRL_PIN(84, "ISH_SPI_MOSI"), PINCTRL_PIN(85, "ISH_UART0_RXD"), PINCTRL_PIN(86, "ISH_UART0_TXD"), PINCTRL_PIN(87, "ISH_UART0_RTSB"), PINCTRL_PIN(88, "ISH_UART0_CTSB"), PINCTRL_PIN(89, "DMIC_CLK_1"), PINCTRL_PIN(90, "DMIC_DATA_1"), PINCTRL_PIN(91, "DMIC_CLK_0"), PINCTRL_PIN(92, "DMIC_DATA_0"), PINCTRL_PIN(93, "SPI1_IO_2"), PINCTRL_PIN(94, "SPI1_IO_3"), PINCTRL_PIN(95, "SSP_MCLK"), /* GPP_E */ PINCTRL_PIN(96, "SATAXPCIE_0"), PINCTRL_PIN(97, "SATAXPCIE_1"), PINCTRL_PIN(98, "SATAXPCIE_2"), PINCTRL_PIN(99, "CPU_GP_0"), PINCTRL_PIN(100, "SATA_DEVSLP_0"), PINCTRL_PIN(101, "SATA_DEVSLP_1"), PINCTRL_PIN(102, "SATA_DEVSLP_2"), PINCTRL_PIN(103, "CPU_GP_1"), PINCTRL_PIN(104, "SATA_LEDB"), PINCTRL_PIN(105, "USB2_OCB_0"), PINCTRL_PIN(106, "USB2_OCB_1"), PINCTRL_PIN(107, "USB2_OCB_2"), PINCTRL_PIN(108, "USB2_OCB_3"), PINCTRL_PIN(109, "DDSP_HPD_0"), PINCTRL_PIN(110, "DDSP_HPD_1"), PINCTRL_PIN(111, "DDSP_HPD_2"), PINCTRL_PIN(112, "DDSP_HPD_3"), PINCTRL_PIN(113, "EDP_HPD"), PINCTRL_PIN(114, "DDPB_CTRLCLK"), PINCTRL_PIN(115, "DDPB_CTRLDATA"), PINCTRL_PIN(116, "DDPC_CTRLCLK"), PINCTRL_PIN(117, "DDPC_CTRLDATA"), PINCTRL_PIN(118, "DDPD_CTRLCLK"), PINCTRL_PIN(119, "DDPD_CTRLDATA"), /* GPP_F */ PINCTRL_PIN(120, "SSP2_SCLK"), PINCTRL_PIN(121, "SSP2_SFRM"), PINCTRL_PIN(122, "SSP2_TXD"), PINCTRL_PIN(123, "SSP2_RXD"), PINCTRL_PIN(124, "I2C2_SDA"), PINCTRL_PIN(125, "I2C2_SCL"), PINCTRL_PIN(126, "I2C3_SDA"), PINCTRL_PIN(127, "I2C3_SCL"), PINCTRL_PIN(128, "I2C4_SDA"), PINCTRL_PIN(129, "I2C4_SCL"), PINCTRL_PIN(130, "I2C5_SDA"), PINCTRL_PIN(131, "I2C5_SCL"), PINCTRL_PIN(132, "EMMC_CMD"), PINCTRL_PIN(133, "EMMC_DATA_0"), PINCTRL_PIN(134, "EMMC_DATA_1"), PINCTRL_PIN(135, "EMMC_DATA_2"), PINCTRL_PIN(136, "EMMC_DATA_3"), PINCTRL_PIN(137, "EMMC_DATA_4"), PINCTRL_PIN(138, "EMMC_DATA_5"), PINCTRL_PIN(139, "EMMC_DATA_6"), PINCTRL_PIN(140, "EMMC_DATA_7"), PINCTRL_PIN(141, "EMMC_RCLK"), PINCTRL_PIN(142, "EMMC_CLK"), PINCTRL_PIN(143, "GPP_F_23"), /* GPP_G */ PINCTRL_PIN(144, "SD_CMD"), PINCTRL_PIN(145, "SD_DATA_0"), PINCTRL_PIN(146, "SD_DATA_1"), PINCTRL_PIN(147, "SD_DATA_2"), PINCTRL_PIN(148, "SD_DATA_3"), PINCTRL_PIN(149, "SD_CDB"), PINCTRL_PIN(150, "SD_CLK"), PINCTRL_PIN(151, "SD_WP"), }; static const unsigned sptlp_spi0_pins[] = { 39, 40, 41, 42 }; static const unsigned sptlp_spi1_pins[] = { 43, 44, 45, 46 }; static const unsigned sptlp_uart0_pins[] = { 56, 57, 58, 59 }; static const unsigned sptlp_uart1_pins[] = { 60, 61, 62, 63 }; static const unsigned sptlp_uart2_pins[] = { 68, 69, 71, 71 }; static const unsigned sptlp_i2c0_pins[] = { 64, 65 }; static const unsigned sptlp_i2c1_pins[] = { 66, 67 }; static const unsigned sptlp_i2c2_pins[] = { 124, 125 }; static const unsigned sptlp_i2c3_pins[] = { 126, 127 }; static const unsigned sptlp_i2c4_pins[] = { 128, 129 }; static const unsigned sptlp_i2c4b_pins[] = { 85, 86 }; static const unsigned sptlp_i2c5_pins[] = { 130, 131 }; static const unsigned sptlp_ssp2_pins[] = { 120, 121, 122, 123 }; static const unsigned sptlp_emmc_pins[] = { 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, }; static const unsigned sptlp_sd_pins[] = { 144, 145, 146, 147, 148, 149, 150, 151, }; static const struct intel_pingroup sptlp_groups[] = { PIN_GROUP("spi0_grp", sptlp_spi0_pins, 1), PIN_GROUP("spi1_grp", sptlp_spi1_pins, 1), PIN_GROUP("uart0_grp", sptlp_uart0_pins, 1), PIN_GROUP("uart1_grp", sptlp_uart1_pins, 1), PIN_GROUP("uart2_grp", sptlp_uart2_pins, 1), PIN_GROUP("i2c0_grp", sptlp_i2c0_pins, 1), PIN_GROUP("i2c1_grp", sptlp_i2c1_pins, 1), PIN_GROUP("i2c2_grp", sptlp_i2c2_pins, 1), PIN_GROUP("i2c3_grp", sptlp_i2c3_pins, 1), PIN_GROUP("i2c4_grp", sptlp_i2c4_pins, 1), PIN_GROUP("i2c4b_grp", sptlp_i2c4b_pins, 3), PIN_GROUP("i2c5_grp", sptlp_i2c5_pins, 1), PIN_GROUP("ssp2_grp", sptlp_ssp2_pins, 1), PIN_GROUP("emmc_grp", sptlp_emmc_pins, 1), PIN_GROUP("sd_grp", sptlp_sd_pins, 1), }; static const char * const sptlp_spi0_groups[] = { "spi0_grp" }; static const char * const sptlp_spi1_groups[] = { "spi0_grp" }; static const char * const sptlp_uart0_groups[] = { "uart0_grp" }; static const char * const sptlp_uart1_groups[] = { "uart1_grp" }; static const char * const sptlp_uart2_groups[] = { "uart2_grp" }; static const char * const sptlp_i2c0_groups[] = { "i2c0_grp" }; static const char * const sptlp_i2c1_groups[] = { "i2c1_grp" }; static const char * const sptlp_i2c2_groups[] = { "i2c2_grp" }; static const char * const sptlp_i2c3_groups[] = { "i2c3_grp" }; static const char * const sptlp_i2c4_groups[] = { "i2c4_grp", "i2c4b_grp" }; static const char * const sptlp_i2c5_groups[] = { "i2c5_grp" }; static const char * const sptlp_ssp2_groups[] = { "ssp2_grp" }; static const char * const sptlp_emmc_groups[] = { "emmc_grp" }; static const char * const sptlp_sd_groups[] = { "sd_grp" }; static const struct intel_function sptlp_functions[] = { FUNCTION("spi0", sptlp_spi0_groups), FUNCTION("spi1", sptlp_spi1_groups), FUNCTION("uart0", sptlp_uart0_groups), FUNCTION("uart1", sptlp_uart1_groups), FUNCTION("uart2", sptlp_uart2_groups), FUNCTION("i2c0", sptlp_i2c0_groups), FUNCTION("i2c1", sptlp_i2c1_groups), FUNCTION("i2c2", sptlp_i2c2_groups), FUNCTION("i2c3", sptlp_i2c3_groups), FUNCTION("i2c4", sptlp_i2c4_groups), FUNCTION("i2c5", sptlp_i2c5_groups), FUNCTION("ssp2", sptlp_ssp2_groups), FUNCTION("emmc", sptlp_emmc_groups), FUNCTION("sd", sptlp_sd_groups), }; static const struct intel_community sptlp_communities[] = { SPT_LP_COMMUNITY(0, 0, 47), SPT_LP_COMMUNITY(1, 48, 119), SPT_LP_COMMUNITY(2, 120, 151), }; static const struct intel_pinctrl_soc_data sptlp_soc_data = { .pins = sptlp_pins, .npins = ARRAY_SIZE(sptlp_pins), .groups = sptlp_groups, .ngroups = ARRAY_SIZE(sptlp_groups), .functions = sptlp_functions, .nfunctions = ARRAY_SIZE(sptlp_functions), .communities = sptlp_communities, .ncommunities = ARRAY_SIZE(sptlp_communities), }; /* Sunrisepoint-H */ static const struct pinctrl_pin_desc spth_pins[] = { /* GPP_A */ PINCTRL_PIN(0, "RCINB"), PINCTRL_PIN(1, "LAD_0"), PINCTRL_PIN(2, "LAD_1"), PINCTRL_PIN(3, "LAD_2"), PINCTRL_PIN(4, "LAD_3"), PINCTRL_PIN(5, "LFRAMEB"), PINCTRL_PIN(6, "SERIQ"), PINCTRL_PIN(7, "PIRQAB"), PINCTRL_PIN(8, "CLKRUNB"), PINCTRL_PIN(9, "CLKOUT_LPC_0"), PINCTRL_PIN(10, "CLKOUT_LPC_1"), PINCTRL_PIN(11, "PMEB"), PINCTRL_PIN(12, "BM_BUSYB"), PINCTRL_PIN(13, "SUSWARNB_SUS_PWRDNACK"), PINCTRL_PIN(14, "SUS_STATB"), PINCTRL_PIN(15, "SUSACKB"), PINCTRL_PIN(16, "CLKOUT_48"), PINCTRL_PIN(17, "ISH_GP_7"), PINCTRL_PIN(18, "ISH_GP_0"), PINCTRL_PIN(19, "ISH_GP_1"), PINCTRL_PIN(20, "ISH_GP_2"), PINCTRL_PIN(21, "ISH_GP_3"), PINCTRL_PIN(22, "ISH_GP_4"), PINCTRL_PIN(23, "ISH_GP_5"), /* GPP_B */ PINCTRL_PIN(24, "CORE_VID_0"), PINCTRL_PIN(25, "CORE_VID_1"), PINCTRL_PIN(26, "VRALERTB"), PINCTRL_PIN(27, "CPU_GP_2"), PINCTRL_PIN(28, "CPU_GP_3"), PINCTRL_PIN(29, "SRCCLKREQB_0"), PINCTRL_PIN(30, "SRCCLKREQB_1"), PINCTRL_PIN(31, "SRCCLKREQB_2"), PINCTRL_PIN(32, "SRCCLKREQB_3"), PINCTRL_PIN(33, "SRCCLKREQB_4"), PINCTRL_PIN(34, "SRCCLKREQB_5"), PINCTRL_PIN(35, "EXT_PWR_GATEB"), PINCTRL_PIN(36, "SLP_S0B"), PINCTRL_PIN(37, "PLTRSTB"), PINCTRL_PIN(38, "SPKR"), PINCTRL_PIN(39, "GSPI0_CSB"), PINCTRL_PIN(40, "GSPI0_CLK"), PINCTRL_PIN(41, "GSPI0_MISO"), PINCTRL_PIN(42, "GSPI0_MOSI"), PINCTRL_PIN(43, "GSPI1_CSB"), PINCTRL_PIN(44, "GSPI1_CLK"), PINCTRL_PIN(45, "GSPI1_MISO"), PINCTRL_PIN(46, "GSPI1_MOSI"), PINCTRL_PIN(47, "SML1ALERTB"), /* GPP_C */ PINCTRL_PIN(48, "SMBCLK"), PINCTRL_PIN(49, "SMBDATA"), PINCTRL_PIN(50, "SMBALERTB"), PINCTRL_PIN(51, "SML0CLK"), PINCTRL_PIN(52, "SML0DATA"), PINCTRL_PIN(53, "SML0ALERTB"), PINCTRL_PIN(54, "SML1CLK"), PINCTRL_PIN(55, "SML1DATA"), PINCTRL_PIN(56, "UART0_RXD"), PINCTRL_PIN(57, "UART0_TXD"), PINCTRL_PIN(58, "UART0_RTSB"), PINCTRL_PIN(59, "UART0_CTSB"), PINCTRL_PIN(60, "UART1_RXD"), PINCTRL_PIN(61, "UART1_TXD"), PINCTRL_PIN(62, "UART1_RTSB"), PINCTRL_PIN(63, "UART1_CTSB"), PINCTRL_PIN(64, "I2C0_SDA"), PINCTRL_PIN(65, "I2C0_SCL"), PINCTRL_PIN(66, "I2C1_SDA"), PINCTRL_PIN(67, "I2C1_SCL"), PINCTRL_PIN(68, "UART2_RXD"), PINCTRL_PIN(69, "UART2_TXD"), PINCTRL_PIN(70, "UART2_RTSB"), PINCTRL_PIN(71, "UART2_CTSB"), /* GPP_D */ PINCTRL_PIN(72, "SPI1_CSB"), PINCTRL_PIN(73, "SPI1_CLK"), PINCTRL_PIN(74, "SPI1_MISO_IO_1"), PINCTRL_PIN(75, "SPI1_MOSI_IO_0"), PINCTRL_PIN(76, "ISH_I2C2_SDA"), PINCTRL_PIN(77, "SSP0_SFRM"), PINCTRL_PIN(78, "SSP0_TXD"), PINCTRL_PIN(79, "SSP0_RXD"), PINCTRL_PIN(80, "SSP0_SCLK"), PINCTRL_PIN(81, "ISH_SPI_CSB"), PINCTRL_PIN(82, "ISH_SPI_CLK"), PINCTRL_PIN(83, "ISH_SPI_MISO"), PINCTRL_PIN(84, "ISH_SPI_MOSI"), PINCTRL_PIN(85, "ISH_UART0_RXD"), PINCTRL_PIN(86, "ISH_UART0_TXD"), PINCTRL_PIN(87, "ISH_UART0_RTSB"), PINCTRL_PIN(88, "ISH_UART0_CTSB"), PINCTRL_PIN(89, "DMIC_CLK_1"), PINCTRL_PIN(90, "DMIC_DATA_1"), PINCTRL_PIN(91, "DMIC_CLK_0"), PINCTRL_PIN(92, "DMIC_DATA_0"), PINCTRL_PIN(93, "SPI1_IO_2"), PINCTRL_PIN(94, "SPI1_IO_3"), PINCTRL_PIN(95, "ISH_I2C2_SCL"), /* GPP_E */ PINCTRL_PIN(96, "SATAXPCIE_0"), PINCTRL_PIN(97, "SATAXPCIE_1"), PINCTRL_PIN(98, "SATAXPCIE_2"), PINCTRL_PIN(99, "CPU_GP_0"), PINCTRL_PIN(100, "SATA_DEVSLP_0"), PINCTRL_PIN(101, "SATA_DEVSLP_1"), PINCTRL_PIN(102, "SATA_DEVSLP_2"), PINCTRL_PIN(103, "CPU_GP_1"), PINCTRL_PIN(104, "SATA_LEDB"), PINCTRL_PIN(105, "USB2_OCB_0"), PINCTRL_PIN(106, "USB2_OCB_1"), PINCTRL_PIN(107, "USB2_OCB_2"), PINCTRL_PIN(108, "USB2_OCB_3"), /* GPP_F */ PINCTRL_PIN(109, "SATAXPCIE_3"), PINCTRL_PIN(110, "SATAXPCIE_4"), PINCTRL_PIN(111, "SATAXPCIE_5"), PINCTRL_PIN(112, "SATAXPCIE_6"), PINCTRL_PIN(113, "SATAXPCIE_7"), PINCTRL_PIN(114, "SATA_DEVSLP_3"), PINCTRL_PIN(115, "SATA_DEVSLP_4"), PINCTRL_PIN(116, "SATA_DEVSLP_5"), PINCTRL_PIN(117, "SATA_DEVSLP_6"), PINCTRL_PIN(118, "SATA_DEVSLP_7"), PINCTRL_PIN(119, "SATA_SCLOCK"), PINCTRL_PIN(120, "SATA_SLOAD"), PINCTRL_PIN(121, "SATA_SDATAOUT1"), PINCTRL_PIN(122, "SATA_SDATAOUT0"), PINCTRL_PIN(123, "GPP_F_14"), PINCTRL_PIN(124, "USB_OCB_4"), PINCTRL_PIN(125, "USB_OCB_5"), PINCTRL_PIN(126, "USB_OCB_6"), PINCTRL_PIN(127, "USB_OCB_7"), PINCTRL_PIN(128, "L_VDDEN"), PINCTRL_PIN(129, "L_BKLTEN"), PINCTRL_PIN(130, "L_BKLTCTL"), PINCTRL_PIN(131, "GPP_F_22"), PINCTRL_PIN(132, "GPP_F_23"), /* GPP_G */ PINCTRL_PIN(133, "FAN_TACH_0"), PINCTRL_PIN(134, "FAN_TACH_1"), PINCTRL_PIN(135, "FAN_TACH_2"), PINCTRL_PIN(136, "FAN_TACH_3"), PINCTRL_PIN(137, "FAN_TACH_4"), PINCTRL_PIN(138, "FAN_TACH_5"), PINCTRL_PIN(139, "FAN_TACH_6"), PINCTRL_PIN(140, "FAN_TACH_7"), PINCTRL_PIN(141, "FAN_PWM_0"), PINCTRL_PIN(142, "FAN_PWM_1"), PINCTRL_PIN(143, "FAN_PWM_2"), PINCTRL_PIN(144, "FAN_PWM_3"), PINCTRL_PIN(145, "GSXDOUT"), PINCTRL_PIN(146, "GSXSLOAD"), PINCTRL_PIN(147, "GSXDIN"), PINCTRL_PIN(148, "GSXRESETB"), PINCTRL_PIN(149, "GSXCLK"), PINCTRL_PIN(150, "ADR_COMPLETE"), PINCTRL_PIN(151, "NMIB"), PINCTRL_PIN(152, "SMIB"), PINCTRL_PIN(153, "GPP_G_20"), PINCTRL_PIN(154, "GPP_G_21"), PINCTRL_PIN(155, "GPP_G_22"), PINCTRL_PIN(156, "GPP_G_23"), /* GPP_H */ PINCTRL_PIN(157, "SRCCLKREQB_6"), PINCTRL_PIN(158, "SRCCLKREQB_7"), PINCTRL_PIN(159, "SRCCLKREQB_8"), PINCTRL_PIN(160, "SRCCLKREQB_9"), PINCTRL_PIN(161, "SRCCLKREQB_10"), PINCTRL_PIN(162, "SRCCLKREQB_11"), PINCTRL_PIN(163, "SRCCLKREQB_12"), PINCTRL_PIN(164, "SRCCLKREQB_13"), PINCTRL_PIN(165, "SRCCLKREQB_14"), PINCTRL_PIN(166, "SRCCLKREQB_15"), PINCTRL_PIN(167, "SML2CLK"), PINCTRL_PIN(168, "SML2DATA"), PINCTRL_PIN(169, "SML2ALERTB"), PINCTRL_PIN(170, "SML3CLK"), PINCTRL_PIN(171, "SML3DATA"), PINCTRL_PIN(172, "SML3ALERTB"), PINCTRL_PIN(173, "SML4CLK"), PINCTRL_PIN(174, "SML4DATA"), PINCTRL_PIN(175, "SML4ALERTB"), PINCTRL_PIN(176, "ISH_I2C0_SDA"), PINCTRL_PIN(177, "ISH_I2C0_SCL"), PINCTRL_PIN(178, "ISH_I2C1_SDA"), PINCTRL_PIN(179, "ISH_I2C1_SCL"), PINCTRL_PIN(180, "GPP_H_23"), /* GPP_I */ PINCTRL_PIN(181, "DDSP_HDP_0"), PINCTRL_PIN(182, "DDSP_HDP_1"), PINCTRL_PIN(183, "DDSP_HDP_2"), PINCTRL_PIN(184, "DDSP_HDP_3"), PINCTRL_PIN(185, "EDP_HPD"), PINCTRL_PIN(186, "DDPB_CTRLCLK"), PINCTRL_PIN(187, "DDPB_CTRLDATA"), PINCTRL_PIN(188, "DDPC_CTRLCLK"), PINCTRL_PIN(189, "DDPC_CTRLDATA"), PINCTRL_PIN(190, "DDPD_CTRLCLK"), PINCTRL_PIN(191, "DDPD_CTRLDATA"), }; static const unsigned spth_spi0_pins[] = { 39, 40, 41, 42 }; static const unsigned spth_spi1_pins[] = { 43, 44, 45, 46 }; static const unsigned spth_uart0_pins[] = { 56, 57, 58, 59 }; static const unsigned spth_uart1_pins[] = { 60, 61, 62, 63 }; static const unsigned spth_uart2_pins[] = { 68, 69, 71, 71 }; static const unsigned spth_i2c0_pins[] = { 64, 65 }; static const unsigned spth_i2c1_pins[] = { 66, 67 }; static const unsigned spth_i2c2_pins[] = { 76, 95 }; static const struct intel_pingroup spth_groups[] = { PIN_GROUP("spi0_grp", spth_spi0_pins, 1), PIN_GROUP("spi1_grp", spth_spi1_pins, 1), PIN_GROUP("uart0_grp", spth_uart0_pins, 1), PIN_GROUP("uart1_grp", spth_uart1_pins, 1), PIN_GROUP("uart2_grp", spth_uart2_pins, 1), PIN_GROUP("i2c0_grp", spth_i2c0_pins, 1), PIN_GROUP("i2c1_grp", spth_i2c1_pins, 1), PIN_GROUP("i2c2_grp", spth_i2c2_pins, 2), }; static const char * const spth_spi0_groups[] = { "spi0_grp" }; static const char * const spth_spi1_groups[] = { "spi0_grp" }; static const char * const spth_uart0_groups[] = { "uart0_grp" }; static const char * const spth_uart1_groups[] = { "uart1_grp" }; static const char * const spth_uart2_groups[] = { "uart2_grp" }; static const char * const spth_i2c0_groups[] = { "i2c0_grp" }; static const char * const spth_i2c1_groups[] = { "i2c1_grp" }; static const char * const spth_i2c2_groups[] = { "i2c2_grp" }; static const struct intel_function spth_functions[] = { FUNCTION("spi0", spth_spi0_groups), FUNCTION("spi1", spth_spi1_groups), FUNCTION("uart0", spth_uart0_groups), FUNCTION("uart1", spth_uart1_groups), FUNCTION("uart2", spth_uart2_groups), FUNCTION("i2c0", spth_i2c0_groups), FUNCTION("i2c1", spth_i2c1_groups), FUNCTION("i2c2", spth_i2c2_groups), }; static const struct intel_padgroup spth_community0_gpps[] = { SPT_H_GPP(0, 0, 23, 0), /* GPP_A */ SPT_H_GPP(1, 24, 47, 24), /* GPP_B */ }; static const struct intel_padgroup spth_community1_gpps[] = { SPT_H_GPP(0, 48, 71, 48), /* GPP_C */ SPT_H_GPP(1, 72, 95, 72), /* GPP_D */ SPT_H_GPP(2, 96, 108, 96), /* GPP_E */ SPT_H_GPP(3, 109, 132, 120), /* GPP_F */ SPT_H_GPP(4, 133, 156, 144), /* GPP_G */ SPT_H_GPP(5, 157, 180, 168), /* GPP_H */ }; static const struct intel_padgroup spth_community3_gpps[] = { SPT_H_GPP(0, 181, 191, 192), /* GPP_I */ }; static const struct intel_community spth_communities[] = { SPT_H_COMMUNITY(0, 0, 47, spth_community0_gpps), SPT_H_COMMUNITY(1, 48, 180, spth_community1_gpps), SPT_H_COMMUNITY(2, 181, 191, spth_community3_gpps), }; static const struct intel_pinctrl_soc_data spth_soc_data = { .pins = spth_pins, .npins = ARRAY_SIZE(spth_pins), .groups = spth_groups, .ngroups = ARRAY_SIZE(spth_groups), .functions = spth_functions, .nfunctions = ARRAY_SIZE(spth_functions), .communities = spth_communities, .ncommunities = ARRAY_SIZE(spth_communities), }; static const struct acpi_device_id spt_pinctrl_acpi_match[] = { { "INT344B", (kernel_ulong_t)&sptlp_soc_data }, { "INT3451", (kernel_ulong_t)&spth_soc_data }, { "INT345D", (kernel_ulong_t)&spth_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match); static struct platform_driver spt_pinctrl_driver = { .probe = intel_pinctrl_probe_by_hid, .driver = { .name = "sunrisepoint-pinctrl", .acpi_match_table = spt_pinctrl_acpi_match, .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops), }, }; static int __init spt_pinctrl_init(void) { return platform_driver_register(&spt_pinctrl_driver); } subsys_initcall(spt_pinctrl_init); static void __exit spt_pinctrl_exit(void) { platform_driver_unregister(&spt_pinctrl_driver); } module_exit(spt_pinctrl_exit); MODULE_AUTHOR("Mathias Nyman <[email protected]>"); MODULE_AUTHOR("Mika Westerberg <[email protected]>"); MODULE_DESCRIPTION("Intel Sunrisepoint PCH pinctrl/GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("PINCTRL_INTEL");
// SPDX-License-Identifier: GPL-2.0 /* * Sophgo SG2042 Clock Generator Driver * * Copyright (C) 2024 Sophgo Technology Inc. * Copyright (C) 2024 Chen Wang <[email protected]> */ #include <linux/array_size.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/div64.h> #include <dt-bindings/clock/sophgo,sg2042-clkgen.h> #include "clk-sg2042.h" /* Registers defined in SYS_CTRL */ #define R_PLL_BEGIN 0xC0 #define R_PLL_STAT (0xC0 - R_PLL_BEGIN) #define R_PLL_CLKEN_CONTROL (0xC4 - R_PLL_BEGIN) #define R_MPLL_CONTROL (0xE8 - R_PLL_BEGIN) #define R_FPLL_CONTROL (0xF4 - R_PLL_BEGIN) #define R_DPLL0_CONTROL (0xF8 - R_PLL_BEGIN) #define R_DPLL1_CONTROL (0xFC - R_PLL_BEGIN) /* Registers defined in CLOCK */ #define R_CLKENREG0 0x00 #define R_CLKENREG1 0x04 #define R_CLKSELREG0 0x20 #define R_CLKDIVREG0 0x40 #define R_CLKDIVREG1 0x44 #define R_CLKDIVREG2 0x48 #define R_CLKDIVREG3 0x4C #define R_CLKDIVREG4 0x50 #define R_CLKDIVREG5 0x54 #define R_CLKDIVREG6 0x58 #define R_CLKDIVREG7 0x5C #define R_CLKDIVREG8 0x60 #define R_CLKDIVREG9 0x64 #define R_CLKDIVREG10 0x68 #define R_CLKDIVREG11 0x6C #define R_CLKDIVREG12 0x70 #define R_CLKDIVREG13 0x74 #define R_CLKDIVREG14 0x78 #define R_CLKDIVREG15 0x7C #define R_CLKDIVREG16 0x80 #define R_CLKDIVREG17 0x84 #define R_CLKDIVREG18 0x88 #define R_CLKDIVREG19 0x8C #define R_CLKDIVREG20 0x90 #define R_CLKDIVREG21 0x94 #define R_CLKDIVREG22 0x98 #define R_CLKDIVREG23 0x9C #define R_CLKDIVREG24 0xA0 #define R_CLKDIVREG25 0xA4 #define R_CLKDIVREG26 0xA8 #define R_CLKDIVREG27 0xAC #define R_CLKDIVREG28 0xB0 #define R_CLKDIVREG29 0xB4 #define R_CLKDIVREG30 0xB8 /* All following shift value are the same for all DIV registers */ #define SHIFT_DIV_RESET_CTRL 0 #define SHIFT_DIV_FACTOR_SEL 3 #define SHIFT_DIV_FACTOR 16 /** * struct sg2042_divider_clock - Divider clock * @hw: clk_hw for initialization * @id: used to map clk_onecell_data * @reg: used for readl/writel. * **NOTE**: DIV registers are ALL in CLOCK! * @lock: spinlock to protect register access, modification of * frequency can only be served one at the time * @offset_ctrl: offset of divider control registers * @shift: shift of "Clock Divider Factor" in divider control register * @width: width of "Clock Divider Factor" in divider control register * @div_flags: private flags for this clock, not for framework-specific * @initval: In the divider control register, we can configure whether * to use the value of "Clock Divider Factor" or just use * the initial value pre-configured by IC. BIT[3] controls * this and by default (value is 0), means initial value * is used. * **NOTE** that we cannot read the initial value (default * value when poweron) and default value of "Clock Divider * Factor" is zero, which I think is a hardware design flaw * and should be sync-ed with the initial value. So in * software we have to add a configuration item (initval) * to manually configure this value and use it when BIT[3] * is zero. */ struct sg2042_divider_clock { struct clk_hw hw; unsigned int id; void __iomem *reg; /* protect register access */ spinlock_t *lock; u32 offset_ctrl; u8 shift; u8 width; u8 div_flags; u32 initval; }; #define to_sg2042_clk_divider(_hw) \ container_of(_hw, struct sg2042_divider_clock, hw) /** * struct sg2042_gate_clock - Gate clock * @hw: clk_hw for initialization * @id: used to map clk_onecell_data * @offset_enable: offset of gate enable registers * @bit_idx: which bit in the register controls gating of this clock */ struct sg2042_gate_clock { struct clk_hw hw; unsigned int id; u32 offset_enable; u8 bit_idx; }; /** * struct sg2042_mux_clock - Mux clock * @hw: clk_hw for initialization * @id: used to map clk_onecell_data * @offset_select: offset of mux selection registers * **NOTE**: MUX registers are ALL in CLOCK! * @shift: shift of "Clock Select" in mux selection register * @width: width of "Clock Select" in mux selection register * @clk_nb: used for notification * @original_index: set by notifier callback */ struct sg2042_mux_clock { struct clk_hw hw; unsigned int id; u32 offset_select; u8 shift; u8 width; struct notifier_block clk_nb; u8 original_index; }; #define to_sg2042_mux_nb(_nb) container_of(_nb, struct sg2042_mux_clock, clk_nb) static unsigned long sg2042_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw); unsigned long ret_rate; u32 val; if (!(readl(divider->reg) & BIT(SHIFT_DIV_FACTOR_SEL))) { val = divider->initval; } else { val = readl(divider->reg) >> divider->shift; val &= clk_div_mask(divider->width); } ret_rate = divider_recalc_rate(hw, parent_rate, val, NULL, divider->div_flags, divider->width); pr_debug("--> %s: divider_recalc_rate: ret_rate = %ld\n", clk_hw_get_name(hw), ret_rate); return ret_rate; } static long sg2042_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw); unsigned long ret_rate; u32 bestdiv; /* if read only, just return current value */ if (divider->div_flags & CLK_DIVIDER_READ_ONLY) { if (!(readl(divider->reg) & BIT(SHIFT_DIV_FACTOR_SEL))) { bestdiv = divider->initval; } else { bestdiv = readl(divider->reg) >> divider->shift; bestdiv &= clk_div_mask(divider->width); } ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv); } else { ret_rate = divider_round_rate(hw, rate, prate, NULL, divider->width, divider->div_flags); } pr_debug("--> %s: divider_round_rate: val = %ld\n", clk_hw_get_name(hw), ret_rate); return ret_rate; } static int sg2042_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw); unsigned long flags = 0; u32 val, val2, value; value = divider_get_val(rate, parent_rate, NULL, divider->width, divider->div_flags); if (divider->lock) spin_lock_irqsave(divider->lock, flags); else __acquire(divider->lock); /* * The sequence of clock frequency modification is: * Assert to reset divider. * Modify the value of Clock Divide Factor (and High Wide if needed). * De-assert to restore divided clock with new frequency. */ val = readl(divider->reg); /* assert */ val &= ~BIT(SHIFT_DIV_RESET_CTRL); writel(val, divider->reg); if (divider->div_flags & CLK_DIVIDER_HIWORD_MASK) { val = clk_div_mask(divider->width) << (divider->shift + 16); } else { val = readl(divider->reg); val &= ~(clk_div_mask(divider->width) << divider->shift); } val |= value << divider->shift; val |= BIT(SHIFT_DIV_FACTOR_SEL); writel(val, divider->reg); val2 = val; /* de-assert */ val |= BIT(SHIFT_DIV_RESET_CTRL); writel(val, divider->reg); if (divider->lock) spin_unlock_irqrestore(divider->lock, flags); else __release(divider->lock); pr_debug("--> %s: divider_set_rate: register val = 0x%x\n", clk_hw_get_name(hw), val2); return 0; } static const struct clk_ops sg2042_clk_divider_ops = { .recalc_rate = sg2042_clk_divider_recalc_rate, .round_rate = sg2042_clk_divider_round_rate, .set_rate = sg2042_clk_divider_set_rate, }; static const struct clk_ops sg2042_clk_divider_ro_ops = { .recalc_rate = sg2042_clk_divider_recalc_rate, .round_rate = sg2042_clk_divider_round_rate, }; /* * Clock initialization macro naming rules: * FW: use CLK_HW_INIT_FW_NAME * HW: use CLK_HW_INIT_HW * HWS: use CLK_HW_INIT_HWS * RO: means Read-Only */ #define SG2042_DIV_FW(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_FW_NAME( \ _name, \ _parent, \ &sg2042_clk_divider_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = _div_flag, \ .initval = _initval, \ } #define SG2042_DIV_FW_RO(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_FW_NAME( \ _name, \ _parent, \ &sg2042_clk_divider_ro_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = (_div_flag) | CLK_DIVIDER_READ_ONLY, \ .initval = _initval, \ } #define SG2042_DIV_HW(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HW( \ _name, \ _parent, \ &sg2042_clk_divider_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = _div_flag, \ .initval = _initval, \ } #define SG2042_DIV_HW_RO(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HW( \ _name, \ _parent, \ &sg2042_clk_divider_ro_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = (_div_flag) | CLK_DIVIDER_READ_ONLY, \ .initval = _initval, \ } #define SG2042_DIV_HWS(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HWS( \ _name, \ _parent, \ &sg2042_clk_divider_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = _div_flag, \ .initval = _initval, \ } #define SG2042_DIV_HWS_RO(_id, _name, _parent, \ _r_ctrl, _shift, _width, \ _div_flag, _initval) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HWS( \ _name, \ _parent, \ &sg2042_clk_divider_ro_ops, \ 0), \ .offset_ctrl = _r_ctrl, \ .shift = _shift, \ .width = _width, \ .div_flags = (_div_flag) | CLK_DIVIDER_READ_ONLY, \ .initval = _initval, \ } #define SG2042_GATE_HWS(_id, _name, _parent, _flags, \ _r_enable, _bit_idx) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HWS( \ _name, \ _parent, \ NULL, \ _flags), \ .offset_enable = _r_enable, \ .bit_idx = _bit_idx, \ } #define SG2042_GATE_HW(_id, _name, _parent, _flags, \ _r_enable, _bit_idx) { \ .id = _id, \ .hw.init = CLK_HW_INIT_HW( \ _name, \ _parent, \ NULL, \ _flags), \ .offset_enable = _r_enable, \ .bit_idx = _bit_idx, \ } #define SG2042_GATE_FW(_id, _name, _parent, _flags, \ _r_enable, _bit_idx) { \ .id = _id, \ .hw.init = CLK_HW_INIT_FW_NAME( \ _name, \ _parent, \ NULL, \ _flags), \ .offset_enable = _r_enable, \ .bit_idx = _bit_idx, \ } #define SG2042_MUX(_id, _name, _parents, _flags, _r_select, _shift, _width) { \ .id = _id, \ .hw.init = CLK_HW_INIT_PARENTS_HW( \ _name, \ _parents, \ NULL, \ _flags), \ .offset_select = _r_select, \ .shift = _shift, \ .width = _width, \ } /* * Clock items in the array are sorted according to the clock-tree diagram, * from top to bottom, from upstream to downstream. Read TRM for details. */ /* updated during probe/registration */ static const struct clk_hw *clk_gate_ddr01_div0[] = { NULL }; static const struct clk_hw *clk_gate_ddr01_div1[] = { NULL }; static const struct clk_hw *clk_gate_ddr23_div0[] = { NULL }; static const struct clk_hw *clk_gate_ddr23_div1[] = { NULL }; static const struct clk_hw *clk_gate_rp_cpu_normal_div0[] = { NULL }; static const struct clk_hw *clk_gate_rp_cpu_normal_div1[] = { NULL }; static const struct clk_hw *clk_gate_axi_ddr_div0[] = { NULL }; static const struct clk_hw *clk_gate_axi_ddr_div1[] = { NULL }; static const struct sg2042_gate_clock sg2042_gate_clks_level_1[] = { SG2042_GATE_FW(GATE_CLK_DDR01_DIV0, "clk_gate_ddr01_div0", "dpll0", CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKDIVREG27, 4), SG2042_GATE_FW(GATE_CLK_DDR01_DIV1, "clk_gate_ddr01_div1", "fpll", CLK_IS_CRITICAL, R_CLKDIVREG28, 4), SG2042_GATE_FW(GATE_CLK_DDR23_DIV0, "clk_gate_ddr23_div0", "dpll1", CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKDIVREG29, 4), SG2042_GATE_FW(GATE_CLK_DDR23_DIV1, "clk_gate_ddr23_div1", "fpll", CLK_IS_CRITICAL, R_CLKDIVREG30, 4), SG2042_GATE_FW(GATE_CLK_RP_CPU_NORMAL_DIV0, "clk_gate_rp_cpu_normal_div0", "mpll", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKDIVREG0, 4), SG2042_GATE_FW(GATE_CLK_RP_CPU_NORMAL_DIV1, "clk_gate_rp_cpu_normal_div1", "fpll", CLK_IS_CRITICAL, R_CLKDIVREG1, 4), SG2042_GATE_FW(GATE_CLK_AXI_DDR_DIV0, "clk_gate_axi_ddr_div0", "mpll", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKDIVREG25, 4), SG2042_GATE_FW(GATE_CLK_AXI_DDR_DIV1, "clk_gate_axi_ddr_div1", "fpll", CLK_IS_CRITICAL, R_CLKDIVREG26, 4), }; #define DEF_DIVFLAG (CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO) static struct sg2042_divider_clock sg2042_div_clks_level_1[] = { SG2042_DIV_HWS_RO(DIV_CLK_DPLL0_DDR01_0, "clk_div_ddr01_0", clk_gate_ddr01_div0, R_CLKDIVREG27, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS_RO(DIV_CLK_FPLL_DDR01_1, "clk_div_ddr01_1", clk_gate_ddr01_div1, R_CLKDIVREG28, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS_RO(DIV_CLK_DPLL1_DDR23_0, "clk_div_ddr23_0", clk_gate_ddr23_div0, R_CLKDIVREG29, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS_RO(DIV_CLK_FPLL_DDR23_1, "clk_div_ddr23_1", clk_gate_ddr23_div1, R_CLKDIVREG30, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS(DIV_CLK_MPLL_RP_CPU_NORMAL_0, "clk_div_rp_cpu_normal_0", clk_gate_rp_cpu_normal_div0, R_CLKDIVREG0, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS(DIV_CLK_FPLL_RP_CPU_NORMAL_1, "clk_div_rp_cpu_normal_1", clk_gate_rp_cpu_normal_div1, R_CLKDIVREG1, 16, 5, DEF_DIVFLAG, 1), SG2042_DIV_HWS(DIV_CLK_MPLL_AXI_DDR_0, "clk_div_axi_ddr_0", clk_gate_axi_ddr_div0, R_CLKDIVREG25, 16, 5, DEF_DIVFLAG, 2), SG2042_DIV_HWS(DIV_CLK_FPLL_AXI_DDR_1, "clk_div_axi_ddr_1", clk_gate_axi_ddr_div1, R_CLKDIVREG26, 16, 5, DEF_DIVFLAG, 1), }; /* * Note: regarding names for mux clock, "0/1" or "div0/div1" means the * first/second parent input source, not the register value. * For example: * "clk_div_ddr01_0" is the name of Clock divider 0 control of DDR01, and * "clk_gate_ddr01_div0" is the gate clock in front of the "clk_div_ddr01_0", * they are both controlled by register CLKDIVREG27; * "clk_div_ddr01_1" is the name of Clock divider 1 control of DDR01, and * "clk_gate_ddr01_div1" is the gate clock in front of the "clk_div_ddr01_1", * they are both controlled by register CLKDIVREG28; * While for register value of mux selection, use Clock Select for DDR01’s clock * as example, see CLKSELREG0, bit[2]. * 1: Select in_dpll0_clk as clock source, correspondng to the parent input * source from "clk_div_ddr01_0". * 0: Select in_fpll_clk as clock source, corresponding to the parent input * source from "clk_div_ddr01_1". * So we need a table to define the array of register values corresponding to * the parent index and tell CCF about this when registering mux clock. */ static const u32 sg2042_mux_table[] = {1, 0}; /* Aliases just for easy reading */ #define clk_div_ddr01_0 (&sg2042_div_clks_level_1[0].hw) #define clk_div_ddr01_1 (&sg2042_div_clks_level_1[1].hw) #define clk_div_ddr23_0 (&sg2042_div_clks_level_1[2].hw) #define clk_div_ddr23_1 (&sg2042_div_clks_level_1[3].hw) #define clk_div_rp_cpu_normal_0 (&sg2042_div_clks_level_1[4].hw) #define clk_div_rp_cpu_normal_1 (&sg2042_div_clks_level_1[5].hw) #define clk_div_axi_ddr_0 (&sg2042_div_clks_level_1[6].hw) #define clk_div_axi_ddr_1 (&sg2042_div_clks_level_1[7].hw) static const struct clk_hw *clk_mux_ddr01_p[] = { clk_div_ddr01_0, clk_div_ddr01_1, }; static const struct clk_hw *clk_mux_ddr23_p[] = { clk_div_ddr23_0, clk_div_ddr23_1, }; static const struct clk_hw *clk_mux_rp_cpu_normal_p[] = { clk_div_rp_cpu_normal_0, clk_div_rp_cpu_normal_1, }; static const struct clk_hw *clk_mux_axi_ddr_p[] = { clk_div_axi_ddr_0, clk_div_axi_ddr_1, }; /* Mux clocks to be updated during probe/registration */ static const struct clk_hw *clk_mux_ddr01[] = { NULL }; static const struct clk_hw *clk_mux_ddr23[] = { NULL }; static const struct clk_hw *clk_mux_rp_cpu_normal[] = { NULL }; static const struct clk_hw *clk_mux_axi_ddr[] = { NULL }; static struct sg2042_mux_clock sg2042_mux_clks[] = { SG2042_MUX(MUX_CLK_DDR01, "clk_mux_ddr01", clk_mux_ddr01_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | CLK_MUX_READ_ONLY, R_CLKSELREG0, 2, 1), SG2042_MUX(MUX_CLK_DDR23, "clk_mux_ddr23", clk_mux_ddr23_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | CLK_MUX_READ_ONLY, R_CLKSELREG0, 3, 1), SG2042_MUX(MUX_CLK_RP_CPU_NORMAL, "clk_mux_rp_cpu_normal", clk_mux_rp_cpu_normal_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, R_CLKSELREG0, 0, 1), SG2042_MUX(MUX_CLK_AXI_DDR, "clk_mux_axi_ddr", clk_mux_axi_ddr_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, R_CLKSELREG0, 1, 1), }; /* Aliases just for easy reading */ #define clk_div_top_rp_cmn_div2 (&sg2042_div_clks_level_2[0].hw) #define clk_div_50m_a53 (&sg2042_div_clks_level_2[1].hw) #define clk_div_timer1 (&sg2042_div_clks_level_2[2].hw) #define clk_div_timer2 (&sg2042_div_clks_level_2[3].hw) #define clk_div_timer3 (&sg2042_div_clks_level_2[4].hw) #define clk_div_timer4 (&sg2042_div_clks_level_2[5].hw) #define clk_div_timer5 (&sg2042_div_clks_level_2[6].hw) #define clk_div_timer6 (&sg2042_div_clks_level_2[7].hw) #define clk_div_timer7 (&sg2042_div_clks_level_2[8].hw) #define clk_div_timer8 (&sg2042_div_clks_level_2[9].hw) #define clk_div_uart_500m (&sg2042_div_clks_level_2[10].hw) #define clk_div_ahb_lpc (&sg2042_div_clks_level_2[11].hw) #define clk_div_efuse (&sg2042_div_clks_level_2[12].hw) #define clk_div_tx_eth0 (&sg2042_div_clks_level_2[13].hw) #define clk_div_ptp_ref_i_eth0 (&sg2042_div_clks_level_2[14].hw) #define clk_div_ref_eth0 (&sg2042_div_clks_level_2[15].hw) #define clk_div_emmc (&sg2042_div_clks_level_2[16].hw) #define clk_div_sd (&sg2042_div_clks_level_2[17].hw) #define clk_div_top_axi0 (&sg2042_div_clks_level_2[18].hw) #define clk_div_100k_emmc (&sg2042_div_clks_level_2[19].hw) #define clk_div_100k_sd (&sg2042_div_clks_level_2[20].hw) #define clk_div_gpio_db (&sg2042_div_clks_level_2[21].hw) #define clk_div_top_axi_hsperi (&sg2042_div_clks_level_2[22].hw) static struct sg2042_divider_clock sg2042_div_clks_level_2[] = { SG2042_DIV_HWS(DIV_CLK_FPLL_TOP_RP_CMN_DIV2, "clk_div_top_rp_cmn_div2", clk_mux_rp_cpu_normal, R_CLKDIVREG3, 16, 16, DEF_DIVFLAG, 2), SG2042_DIV_FW(DIV_CLK_FPLL_50M_A53, "clk_div_50m_a53", "fpll", R_CLKDIVREG2, 16, 8, DEF_DIVFLAG, 20), /* downstream of div_50m_a53 */ SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER1, "clk_div_timer1", clk_div_50m_a53, R_CLKDIVREG6, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER2, "clk_div_timer2", clk_div_50m_a53, R_CLKDIVREG7, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER3, "clk_div_timer3", clk_div_50m_a53, R_CLKDIVREG8, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER4, "clk_div_timer4", clk_div_50m_a53, R_CLKDIVREG9, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER5, "clk_div_timer5", clk_div_50m_a53, R_CLKDIVREG10, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER6, "clk_div_timer6", clk_div_50m_a53, R_CLKDIVREG11, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER7, "clk_div_timer7", clk_div_50m_a53, R_CLKDIVREG12, 16, 16, DEF_DIVFLAG, 1), SG2042_DIV_HW(DIV_CLK_FPLL_DIV_TIMER8, "clk_div_timer8", clk_div_50m_a53, R_CLKDIVREG13, 16, 16, DEF_DIVFLAG, 1), /* * Set clk_div_uart_500m as RO, because the width of CLKDIVREG4 is too * narrow for us to produce 115200. Use UART internal divider directly. */ SG2042_DIV_FW_RO(DIV_CLK_FPLL_UART_500M, "clk_div_uart_500m", "fpll", R_CLKDIVREG4, 16, 7, DEF_DIVFLAG, 2), SG2042_DIV_FW(DIV_CLK_FPLL_AHB_LPC, "clk_div_ahb_lpc", "fpll", R_CLKDIVREG5, 16, 16, DEF_DIVFLAG, 5), SG2042_DIV_FW(DIV_CLK_FPLL_EFUSE, "clk_div_efuse", "fpll", R_CLKDIVREG14, 16, 7, DEF_DIVFLAG, 40), SG2042_DIV_FW(DIV_CLK_FPLL_TX_ETH0, "clk_div_tx_eth0", "fpll", R_CLKDIVREG16, 16, 11, DEF_DIVFLAG, 8), SG2042_DIV_FW(DIV_CLK_FPLL_PTP_REF_I_ETH0, "clk_div_ptp_ref_i_eth0", "fpll", R_CLKDIVREG17, 16, 8, DEF_DIVFLAG, 20), SG2042_DIV_FW(DIV_CLK_FPLL_REF_ETH0, "clk_div_ref_eth0", "fpll", R_CLKDIVREG18, 16, 8, DEF_DIVFLAG, 40), SG2042_DIV_FW(DIV_CLK_FPLL_EMMC, "clk_div_emmc", "fpll", R_CLKDIVREG19, 16, 5, DEF_DIVFLAG, 10), SG2042_DIV_FW(DIV_CLK_FPLL_SD, "clk_div_sd", "fpll", R_CLKDIVREG21, 16, 5, DEF_DIVFLAG, 10), SG2042_DIV_FW(DIV_CLK_FPLL_TOP_AXI0, "clk_div_top_axi0", "fpll", R_CLKDIVREG23, 16, 5, DEF_DIVFLAG, 10), /* downstream of div_top_axi0 */ SG2042_DIV_HW(DIV_CLK_FPLL_100K_EMMC, "clk_div_100k_emmc", clk_div_top_axi0, R_CLKDIVREG20, 16, 16, DEF_DIVFLAG, 1000), SG2042_DIV_HW(DIV_CLK_FPLL_100K_SD, "clk_div_100k_sd", clk_div_top_axi0, R_CLKDIVREG22, 16, 16, DEF_DIVFLAG, 1000), SG2042_DIV_HW(DIV_CLK_FPLL_GPIO_DB, "clk_div_gpio_db", clk_div_top_axi0, R_CLKDIVREG15, 16, 16, DEF_DIVFLAG, 1000), SG2042_DIV_FW(DIV_CLK_FPLL_TOP_AXI_HSPERI, "clk_div_top_axi_hsperi", "fpll", R_CLKDIVREG24, 16, 5, DEF_DIVFLAG, 4), }; /* Gate clocks to be updated during probe/registration */ static const struct clk_hw *clk_gate_rp_cpu_normal[] = { NULL }; static const struct clk_hw *clk_gate_top_rp_cmn_div2[] = { NULL }; static const struct sg2042_gate_clock sg2042_gate_clks_level_2[] = { SG2042_GATE_HWS(GATE_CLK_DDR01, "clk_gate_ddr01", clk_mux_ddr01, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG1, 14), SG2042_GATE_HWS(GATE_CLK_DDR23, "clk_gate_ddr23", clk_mux_ddr23, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG1, 15), SG2042_GATE_HWS(GATE_CLK_RP_CPU_NORMAL, "clk_gate_rp_cpu_normal", clk_mux_rp_cpu_normal, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG0, 0), SG2042_GATE_HWS(GATE_CLK_AXI_DDR, "clk_gate_axi_ddr", clk_mux_axi_ddr, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG1, 13), /* upon are gate clocks directly downstream of muxes */ /* downstream of clk_div_top_rp_cmn_div2 */ SG2042_GATE_HW(GATE_CLK_TOP_RP_CMN_DIV2, "clk_gate_top_rp_cmn_div2", clk_div_top_rp_cmn_div2, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKENREG0, 2), SG2042_GATE_HWS(GATE_CLK_HSDMA, "clk_gate_hsdma", clk_gate_top_rp_cmn_div2, CLK_SET_RATE_PARENT, R_CLKENREG1, 10), /* * downstream of clk_gate_rp_cpu_normal * * FIXME: there should be one 1/2 DIV between clk_gate_rp_cpu_normal * and clk_gate_axi_pcie0/clk_gate_axi_pcie1. * But the 1/2 DIV is fixed and no configurable register exported, so * when reading from these two clocks, the rate value are still the * same as that of clk_gate_rp_cpu_normal, it's not correct. * This just affects the value read. */ SG2042_GATE_HWS(GATE_CLK_AXI_PCIE0, "clk_gate_axi_pcie0", clk_gate_rp_cpu_normal, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKENREG1, 8), SG2042_GATE_HWS(GATE_CLK_AXI_PCIE1, "clk_gate_axi_pcie1", clk_gate_rp_cpu_normal, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKENREG1, 9), /* downstream of div_50m_a53 */ SG2042_GATE_HW(GATE_CLK_A53_50M, "clk_gate_a53_50m", clk_div_50m_a53, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKENREG0, 1), SG2042_GATE_HW(GATE_CLK_TIMER1, "clk_gate_timer1", clk_div_timer1, CLK_SET_RATE_PARENT, R_CLKENREG0, 12), SG2042_GATE_HW(GATE_CLK_TIMER2, "clk_gate_timer2", clk_div_timer2, CLK_SET_RATE_PARENT, R_CLKENREG0, 13), SG2042_GATE_HW(GATE_CLK_TIMER3, "clk_gate_timer3", clk_div_timer3, CLK_SET_RATE_PARENT, R_CLKENREG0, 14), SG2042_GATE_HW(GATE_CLK_TIMER4, "clk_gate_timer4", clk_div_timer4, CLK_SET_RATE_PARENT, R_CLKENREG0, 15), SG2042_GATE_HW(GATE_CLK_TIMER5, "clk_gate_timer5", clk_div_timer5, CLK_SET_RATE_PARENT, R_CLKENREG0, 16), SG2042_GATE_HW(GATE_CLK_TIMER6, "clk_gate_timer6", clk_div_timer6, CLK_SET_RATE_PARENT, R_CLKENREG0, 17), SG2042_GATE_HW(GATE_CLK_TIMER7, "clk_gate_timer7", clk_div_timer7, CLK_SET_RATE_PARENT, R_CLKENREG0, 18), SG2042_GATE_HW(GATE_CLK_TIMER8, "clk_gate_timer8", clk_div_timer8, CLK_SET_RATE_PARENT, R_CLKENREG0, 19), /* gate clocks downstream from div clocks one-to-one */ SG2042_GATE_HW(GATE_CLK_UART_500M, "clk_gate_uart_500m", clk_div_uart_500m, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, R_CLKENREG0, 4), SG2042_GATE_HW(GATE_CLK_AHB_LPC, "clk_gate_ahb_lpc", clk_div_ahb_lpc, CLK_SET_RATE_PARENT, R_CLKENREG0, 7), SG2042_GATE_HW(GATE_CLK_EFUSE, "clk_gate_efuse", clk_div_efuse, CLK_SET_RATE_PARENT, R_CLKENREG0, 20), SG2042_GATE_HW(GATE_CLK_TX_ETH0, "clk_gate_tx_eth0", clk_div_tx_eth0, CLK_SET_RATE_PARENT, R_CLKENREG0, 30), SG2042_GATE_HW(GATE_CLK_PTP_REF_I_ETH0, "clk_gate_ptp_ref_i_eth0", clk_div_ptp_ref_i_eth0, CLK_SET_RATE_PARENT, R_CLKENREG1, 0), SG2042_GATE_HW(GATE_CLK_REF_ETH0, "clk_gate_ref_eth0", clk_div_ref_eth0, CLK_SET_RATE_PARENT, R_CLKENREG1, 1), SG2042_GATE_HW(GATE_CLK_EMMC_100M, "clk_gate_emmc", clk_div_emmc, CLK_SET_RATE_PARENT, R_CLKENREG1, 3), SG2042_GATE_HW(GATE_CLK_SD_100M, "clk_gate_sd", clk_div_sd, CLK_SET_RATE_PARENT, R_CLKENREG1, 6), /* downstream of clk_div_top_axi0 */ SG2042_GATE_HW(GATE_CLK_AHB_ROM, "clk_gate_ahb_rom", clk_div_top_axi0, 0, R_CLKENREG0, 8), SG2042_GATE_HW(GATE_CLK_AHB_SF, "clk_gate_ahb_sf", clk_div_top_axi0, 0, R_CLKENREG0, 9), SG2042_GATE_HW(GATE_CLK_AXI_SRAM, "clk_gate_axi_sram", clk_div_top_axi0, CLK_IGNORE_UNUSED, R_CLKENREG0, 10), SG2042_GATE_HW(GATE_CLK_APB_TIMER, "clk_gate_apb_timer", clk_div_top_axi0, CLK_IGNORE_UNUSED, R_CLKENREG0, 11), SG2042_GATE_HW(GATE_CLK_APB_EFUSE, "clk_gate_apb_efuse", clk_div_top_axi0, 0, R_CLKENREG0, 21), SG2042_GATE_HW(GATE_CLK_APB_GPIO, "clk_gate_apb_gpio", clk_div_top_axi0, 0, R_CLKENREG0, 22), SG2042_GATE_HW(GATE_CLK_APB_GPIO_INTR, "clk_gate_apb_gpio_intr", clk_div_top_axi0, CLK_IS_CRITICAL, R_CLKENREG0, 23), SG2042_GATE_HW(GATE_CLK_APB_I2C, "clk_gate_apb_i2c", clk_div_top_axi0, 0, R_CLKENREG0, 26), SG2042_GATE_HW(GATE_CLK_APB_WDT, "clk_gate_apb_wdt", clk_div_top_axi0, 0, R_CLKENREG0, 27), SG2042_GATE_HW(GATE_CLK_APB_PWM, "clk_gate_apb_pwm", clk_div_top_axi0, 0, R_CLKENREG0, 28), SG2042_GATE_HW(GATE_CLK_APB_RTC, "clk_gate_apb_rtc", clk_div_top_axi0, 0, R_CLKENREG0, 29), SG2042_GATE_HW(GATE_CLK_TOP_AXI0, "clk_gate_top_axi0", clk_div_top_axi0, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG1, 11), /* downstream of DIV clocks which are sourced from clk_div_top_axi0 */ SG2042_GATE_HW(GATE_CLK_GPIO_DB, "clk_gate_gpio_db", clk_div_gpio_db, CLK_SET_RATE_PARENT, R_CLKENREG0, 24), SG2042_GATE_HW(GATE_CLK_100K_EMMC, "clk_gate_100k_emmc", clk_div_100k_emmc, CLK_SET_RATE_PARENT, R_CLKENREG1, 4), SG2042_GATE_HW(GATE_CLK_100K_SD, "clk_gate_100k_sd", clk_div_100k_sd, CLK_SET_RATE_PARENT, R_CLKENREG1, 7), /* downstream of clk_div_top_axi_hsperi */ SG2042_GATE_HW(GATE_CLK_SYSDMA_AXI, "clk_gate_sysdma_axi", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG0, 3), SG2042_GATE_HW(GATE_CLK_APB_UART, "clk_gate_apb_uart", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG0, 5), SG2042_GATE_HW(GATE_CLK_AXI_DBG_I2C, "clk_gate_axi_dbg_i2c", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG0, 6), SG2042_GATE_HW(GATE_CLK_APB_SPI, "clk_gate_apb_spi", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG0, 25), SG2042_GATE_HW(GATE_CLK_AXI_ETH0, "clk_gate_axi_eth0", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG0, 31), SG2042_GATE_HW(GATE_CLK_AXI_EMMC, "clk_gate_axi_emmc", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG1, 2), SG2042_GATE_HW(GATE_CLK_AXI_SD, "clk_gate_axi_sd", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT, R_CLKENREG1, 5), SG2042_GATE_HW(GATE_CLK_TOP_AXI_HSPERI, "clk_gate_top_axi_hsperi", clk_div_top_axi_hsperi, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, R_CLKENREG1, 12), }; static DEFINE_SPINLOCK(sg2042_clk_lock); static int sg2042_clk_register_divs(struct device *dev, struct sg2042_clk_data *clk_data, struct sg2042_divider_clock div_clks[], int num_div_clks) { struct sg2042_divider_clock *div; struct clk_hw *hw; int i, ret = 0; for (i = 0; i < num_div_clks; i++) { div = &div_clks[i]; if (div->div_flags & CLK_DIVIDER_HIWORD_MASK) { if (div->width + div->shift > 16) { pr_warn("divider value exceeds LOWORD field\n"); ret = -EINVAL; break; } } div->reg = clk_data->iobase + div->offset_ctrl; div->lock = &sg2042_clk_lock; hw = &div->hw; ret = devm_clk_hw_register(dev, hw); if (ret) { pr_err("failed to register clock %s\n", div->hw.init->name); break; } clk_data->onecell_data.hws[div->id] = hw; } return ret; } static int sg2042_clk_register_gates(struct device *dev, struct sg2042_clk_data *clk_data, const struct sg2042_gate_clock gate_clks[], int num_gate_clks) { const struct sg2042_gate_clock *gate; struct clk_hw *hw; int i, ret = 0; for (i = 0; i < num_gate_clks; i++) { gate = &gate_clks[i]; hw = __devm_clk_hw_register_gate (dev, NULL, gate->hw.init->name, NULL, gate->hw.init->parent_hws[0], NULL, gate->hw.init->flags, clk_data->iobase + gate->offset_enable, gate->bit_idx, 0, &sg2042_clk_lock); if (IS_ERR(hw)) { pr_err("failed to register clock %s\n", gate->hw.init->name); ret = PTR_ERR(hw); break; } clk_data->onecell_data.hws[gate->id] = hw; /* Updated some clocks which take the role of parent */ switch (gate->id) { case GATE_CLK_RP_CPU_NORMAL: *clk_gate_rp_cpu_normal = hw; break; case GATE_CLK_TOP_RP_CMN_DIV2: *clk_gate_top_rp_cmn_div2 = hw; break; } } return ret; } static int sg2042_clk_register_gates_fw(struct device *dev, struct sg2042_clk_data *clk_data, const struct sg2042_gate_clock gate_clks[], int num_gate_clks) { const struct sg2042_gate_clock *gate; struct clk_hw *hw; int i, ret = 0; for (i = 0; i < num_gate_clks; i++) { gate = &gate_clks[i]; hw = devm_clk_hw_register_gate_parent_data (dev, gate->hw.init->name, gate->hw.init->parent_data, gate->hw.init->flags, clk_data->iobase + gate->offset_enable, gate->bit_idx, 0, &sg2042_clk_lock); if (IS_ERR(hw)) { pr_err("failed to register clock %s\n", gate->hw.init->name); ret = PTR_ERR(hw); break; } clk_data->onecell_data.hws[gate->id] = hw; /* Updated some clocks which take the role of parent */ switch (gate->id) { case GATE_CLK_DDR01_DIV0: *clk_gate_ddr01_div0 = hw; break; case GATE_CLK_DDR01_DIV1: *clk_gate_ddr01_div1 = hw; break; case GATE_CLK_DDR23_DIV0: *clk_gate_ddr23_div0 = hw; break; case GATE_CLK_DDR23_DIV1: *clk_gate_ddr23_div1 = hw; break; case GATE_CLK_RP_CPU_NORMAL_DIV0: *clk_gate_rp_cpu_normal_div0 = hw; break; case GATE_CLK_RP_CPU_NORMAL_DIV1: *clk_gate_rp_cpu_normal_div1 = hw; break; case GATE_CLK_AXI_DDR_DIV0: *clk_gate_axi_ddr_div0 = hw; break; case GATE_CLK_AXI_DDR_DIV1: *clk_gate_axi_ddr_div1 = hw; break; } } return ret; } static int sg2042_mux_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct sg2042_mux_clock *mux = to_sg2042_mux_nb(nb); const struct clk_ops *ops = &clk_mux_ops; struct clk_notifier_data *ndata = data; struct clk_hw *hw; int ret = 0; hw = __clk_get_hw(ndata->clk); /* To switch to fpll before changing rate and restore after that */ if (event == PRE_RATE_CHANGE) { mux->original_index = ops->get_parent(hw); /* * "1" is the array index of the second parent input source of * mux. For SG2042, it's fpll for all mux clocks. * "0" is the array index of the frist parent input source of * mux, For SG2042, it's mpll. * FIXME, any good idea to avoid magic number? */ if (mux->original_index == 0) ret = ops->set_parent(hw, 1); } else if (event == POST_RATE_CHANGE) { ret = ops->set_parent(hw, mux->original_index); } return notifier_from_errno(ret); } static int sg2042_clk_register_muxs(struct device *dev, struct sg2042_clk_data *clk_data, struct sg2042_mux_clock mux_clks[], int num_mux_clks) { struct sg2042_mux_clock *mux; struct clk_hw *hw; int i, ret = 0; for (i = 0; i < num_mux_clks; i++) { mux = &mux_clks[i]; hw = __devm_clk_hw_register_mux (dev, NULL, mux->hw.init->name, mux->hw.init->num_parents, NULL, mux->hw.init->parent_hws, NULL, mux->hw.init->flags, clk_data->iobase + mux->offset_select, mux->shift, BIT(mux->width) - 1, 0, sg2042_mux_table, &sg2042_clk_lock); if (IS_ERR(hw)) { pr_err("failed to register clock %s\n", mux->hw.init->name); ret = PTR_ERR(hw); break; } clk_data->onecell_data.hws[mux->id] = hw; /* Updated some clocks which takes the role of parent */ switch (mux->id) { case MUX_CLK_DDR01: *clk_mux_ddr01 = hw; break; case MUX_CLK_DDR23: *clk_mux_ddr23 = hw; break; case MUX_CLK_RP_CPU_NORMAL: *clk_mux_rp_cpu_normal = hw; break; case MUX_CLK_AXI_DDR: *clk_mux_axi_ddr = hw; break; } /* * FIXME: Theoretically, we should set parent for the * mux, but seems hardware has done this for us with * default value, so we don't set parent again here. */ if (!(mux->hw.init->flags & CLK_MUX_READ_ONLY)) { mux->clk_nb.notifier_call = sg2042_mux_notifier_cb; ret = devm_clk_notifier_register(dev, hw->clk, &mux->clk_nb); if (ret) { pr_err("failed to register clock notifier for %s\n", mux->hw.init->name); break; } } } return ret; } static int sg2042_init_clkdata(struct platform_device *pdev, int num_clks, struct sg2042_clk_data **pp_clk_data) { struct sg2042_clk_data *clk_data = NULL; clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, onecell_data.hws, num_clks), GFP_KERNEL); if (!clk_data) return -ENOMEM; clk_data->iobase = devm_platform_ioremap_resource(pdev, 0); if (WARN_ON(IS_ERR(clk_data->iobase))) return PTR_ERR(clk_data->iobase); clk_data->onecell_data.num = num_clks; *pp_clk_data = clk_data; return 0; } static int sg2042_clkgen_probe(struct platform_device *pdev) { struct sg2042_clk_data *clk_data = NULL; int num_clks; int ret; num_clks = ARRAY_SIZE(sg2042_div_clks_level_1) + ARRAY_SIZE(sg2042_div_clks_level_2) + ARRAY_SIZE(sg2042_gate_clks_level_1) + ARRAY_SIZE(sg2042_gate_clks_level_2) + ARRAY_SIZE(sg2042_mux_clks); ret = sg2042_init_clkdata(pdev, num_clks, &clk_data); if (ret) goto error_out; /* level-1 gates */ ret = sg2042_clk_register_gates_fw(&pdev->dev, clk_data, sg2042_gate_clks_level_1, ARRAY_SIZE(sg2042_gate_clks_level_1)); if (ret) goto error_out; /* level-1 div */ ret = sg2042_clk_register_divs(&pdev->dev, clk_data, sg2042_div_clks_level_1, ARRAY_SIZE(sg2042_div_clks_level_1)); if (ret) goto error_out; /* mux */ ret = sg2042_clk_register_muxs(&pdev->dev, clk_data, sg2042_mux_clks, ARRAY_SIZE(sg2042_mux_clks)); if (ret) goto error_out; /* level 2 div */ ret = sg2042_clk_register_divs(&pdev->dev, clk_data, sg2042_div_clks_level_2, ARRAY_SIZE(sg2042_div_clks_level_2)); if (ret) goto error_out; /* level 2 gate */ ret = sg2042_clk_register_gates(&pdev->dev, clk_data, sg2042_gate_clks_level_2, ARRAY_SIZE(sg2042_gate_clks_level_2)); if (ret) goto error_out; return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get, &clk_data->onecell_data); error_out: pr_err("%s failed error number %d\n", __func__, ret); return ret; } static const struct of_device_id sg2042_clkgen_match[] = { { .compatible = "sophgo,sg2042-clkgen" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sg2042_clkgen_match); static struct platform_driver sg2042_clkgen_driver = { .probe = sg2042_clkgen_probe, .driver = { .name = "clk-sophgo-sg2042-clkgen", .of_match_table = sg2042_clkgen_match, .suppress_bind_attrs = true, }, }; module_platform_driver(sg2042_clkgen_driver); MODULE_AUTHOR("Chen Wang"); MODULE_DESCRIPTION("Sophgo SG2042 clock generator driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Cavium ThunderX SPI driver. * * Copyright (C) 2016 Cavium Inc. * Authors: Jan Glauber <[email protected]> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/spi/spi.h> #include "spi-cavium.h" #define DRV_NAME "spi-thunderx" #define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */ static int thunderx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct spi_controller *host; struct octeon_spi *p; int ret; host = spi_alloc_host(dev, sizeof(struct octeon_spi)); if (!host) return -ENOMEM; p = spi_controller_get_devdata(host); ret = pcim_enable_device(pdev); if (ret) goto error; ret = pci_request_regions(pdev, DRV_NAME); if (ret) goto error; p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!p->register_base) { ret = -EINVAL; goto error; } p->regs.config = 0x1000; p->regs.status = 0x1008; p->regs.tx = 0x1010; p->regs.data = 0x1080; p->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(p->clk)) { ret = PTR_ERR(p->clk); goto error; } p->sys_freq = clk_get_rate(p->clk); if (!p->sys_freq) p->sys_freq = SYS_FREQ_DEFAULT; dev_info(dev, "Set system clock to %u\n", p->sys_freq); host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->num_chipselect = 4; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE; host->transfer_one_message = octeon_spi_transfer_one_message; host->bits_per_word_mask = SPI_BPW_MASK(8); host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ; host->dev.of_node = pdev->dev.of_node; pci_set_drvdata(pdev, host); ret = devm_spi_register_controller(dev, host); if (ret) goto error; return 0; error: pci_release_regions(pdev); spi_controller_put(host); return ret; } static void thunderx_spi_remove(struct pci_dev *pdev) { struct spi_controller *host = pci_get_drvdata(pdev); struct octeon_spi *p; p = spi_controller_get_devdata(host); if (!p) return; pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } static const struct pci_device_id thunderx_spi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) }, { 0, } }; MODULE_DEVICE_TABLE(pci, thunderx_spi_pci_id_table); static struct pci_driver thunderx_spi_driver = { .name = DRV_NAME, .id_table = thunderx_spi_pci_id_table, .probe = thunderx_spi_probe, .remove = thunderx_spi_remove, }; module_pci_driver(thunderx_spi_driver); MODULE_DESCRIPTION("Cavium, Inc. ThunderX SPI bus driver"); MODULE_AUTHOR("Jan Glauber"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Dummy IRQ handler driver. * * This module only registers itself as a handler that is specified to it * by the 'irq' parameter. * * The sole purpose of this module is to help with debugging of systems on * which spurious IRQs would happen on disabled IRQ vector. * * Copyright (C) 2013 Jiri Kosina */ #include <linux/module.h> #include <linux/irq.h> #include <linux/interrupt.h> static int irq = -1; static irqreturn_t dummy_interrupt(int irq, void *dev_id) { static int count = 0; if (count == 0) { printk(KERN_INFO "dummy-irq: interrupt occurred on IRQ %d\n", irq); count++; } return IRQ_NONE; } static int __init dummy_irq_init(void) { if (irq < 0) { printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); return -EIO; } if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); return -EIO; } printk(KERN_INFO "dummy-irq: registered for IRQ %d\n", irq); return 0; } static void __exit dummy_irq_exit(void) { printk(KERN_INFO "dummy-irq unloaded\n"); free_irq(irq, &irq); } module_init(dummy_irq_init); module_exit(dummy_irq_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jiri Kosina"); module_param_hw(irq, uint, irq, 0444); MODULE_PARM_DESC(irq, "The IRQ to register for"); MODULE_DESCRIPTION("Dummy IRQ handler driver");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TEST_MAPS_H #define _TEST_MAPS_H #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define CHECK(condition, tag, format...) ({ \ int __ret = !!(condition); \ if (__ret) { \ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ printf(format); \ exit(-1); \ } \ }) extern int skips; typedef bool (*retry_for_error_fn)(int err); int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts, retry_for_error_fn need_retry); #endif
// SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/ni_device_routes.c * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #include "ni_device_routes.h" #include "ni_device_routes/all.h" struct ni_device_routes *const ni_device_routes_list[] = { &ni_pxi_6030e_device_routes, &ni_pci_6070e_device_routes, &ni_pci_6220_device_routes, &ni_pci_6221_device_routes, &ni_pxi_6224_device_routes, &ni_pxi_6225_device_routes, &ni_pci_6229_device_routes, &ni_pci_6251_device_routes, &ni_pxi_6251_device_routes, &ni_pxie_6251_device_routes, &ni_pci_6254_device_routes, &ni_pci_6259_device_routes, &ni_pci_6534_device_routes, &ni_pci_6602_device_routes, &ni_pci_6713_device_routes, &ni_pci_6723_device_routes, &ni_pci_6733_device_routes, &ni_pxi_6733_device_routes, NULL, };
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Copyright 2023 Toradex * * Common dtsi for Verdin AM62 SoM on Dahlia carrier board * * https://www.toradex.com/computer-on-modules/verdin-arm-family/ti-am62 * https://www.toradex.com/products/carrier-board/dahlia-carrier-board-kit */ / { reg_1v8_sw: regulator-1v8-sw { compatible = "regulator-fixed"; regulator-max-microvolt = <1800000>; regulator-min-microvolt = <1800000>; regulator-name = "On-carrier +V1.8_SW"; }; sound { compatible = "simple-audio-card"; simple-audio-card,bitclock-master = <&codec_dai>; simple-audio-card,format = "i2s"; simple-audio-card,frame-master = <&codec_dai>; simple-audio-card,name = "verdin-wm8904"; simple-audio-card,mclk-fs = <256>; simple-audio-card,routing = "Headphone Jack", "HPOUTL", "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", "Headphone Jack", "MICBIAS", "IN1L", "Headphone Jack"; simple-audio-card,widgets = "Microphone", "Headphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; codec_dai: simple-audio-card,codec { sound-dai = <&wm8904_1a>; }; simple-audio-card,cpu { sound-dai = <&mcasp0>; }; }; }; /* Verdin ETHs */ &cpsw3g { status = "okay"; }; /* MDIO, shared by Verdin ETH_1 (On-module PHY) and Verdin ETH_2_RGMII */ &cpsw3g_mdio { status = "okay"; }; /* Verdin ETH_1 (On-module PHY) */ &cpsw_port1 { status = "okay"; }; /* Verdin PWM_1, PWM_2 */ &epwm0 { status = "okay"; }; /* Verdin PWM_3_DSI */ &epwm1 { status = "okay"; }; &main_gpio0 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ctrl_sleep_moci>, <&pinctrl_gpio_5>, <&pinctrl_gpio_6>, <&pinctrl_gpio_7>, <&pinctrl_gpio_8>; }; /* Verdin I2C_1 */ &main_i2c1 { status = "okay"; /* Audio Codec */ wm8904_1a: audio-codec@1a { compatible = "wlf,wm8904"; reg = <0x1a>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2s1_mclk>; #sound-dai-cells = <0>; clocks = <&audio_refclk1>; clock-names = "mclk"; AVDD-supply = <&reg_1v8_sw>; CPVDD-supply = <&reg_1v8_sw>; DBVDD-supply = <&reg_1v8_sw>; DCVDD-supply = <&reg_1v8_sw>; MICVDD-supply = <&reg_1v8_sw>; }; /* Current measurement into module VCC */ hwmon@40 { compatible = "ti,ina219"; reg = <0x40>; shunt-resistor = <10000>; }; /* Temperature sensor */ sensor@4f { compatible = "ti,tmp75c"; reg = <0x4f>; }; /* EEPROM */ eeprom@57 { compatible = "st,24c02"; reg = <0x57>; pagesize = <16>; }; }; /* Verdin I2C_2_DSI */ &main_i2c2 { status = "okay"; }; /* Verdin I2C_4_CSI */ &main_i2c3 { status = "okay"; }; /* Verdin CAN_1 */ &main_mcan0 { status = "okay"; }; /* Verdin SPI_1 */ &main_spi1 { status = "okay"; }; /* Verdin UART_3 */ &main_uart0 { status = "okay"; }; /* Verdin UART_1 */ &main_uart1 { status = "okay"; }; /* Verdin I2S_1 */ &mcasp0 { status = "okay"; }; &mcu_gpio0 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gpio_1>, <&pinctrl_gpio_2>, <&pinctrl_gpio_3>, <&pinctrl_gpio_4>, <&pinctrl_pcie_1_reset>; }; /* Verdin I2C_3_HDMI */ &mcu_i2c0 { status = "okay"; }; /* Verdin CAN_2 */ &mcu_mcan0 { status = "okay"; }; /* Verdin UART_4 */ &mcu_uart0 { status = "okay"; }; /* Verdin QSPI_1 */ &ospi0 { status = "okay"; }; /* Verdin SD_1 */ &sdhci1 { status = "okay"; }; /* Verdin USB_1 */ &usbss0 { status = "okay"; }; &usb0 { status = "okay"; }; /* Verdin USB_2 */ &usbss1 { status = "okay"; }; &usb1 { status = "okay"; }; /* Verdin CTRL_WAKE1_MICO# */ &verdin_gpio_keys { status = "okay"; }; /* Verdin PCIE_1_RESET# */ &verdin_pcie_1_reset_hog { status = "okay"; }; /* Verdin UART_2 */ &wkup_uart0 { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause // Copyright (c) 2023 Cloudflare /* Test IP_LOCAL_PORT_RANGE socket option: IPv4 + IPv6, TCP + UDP. * * Tests assume that net.ipv4.ip_local_port_range is [40000, 49999]. * Don't run these directly but with ip_local_port_range.sh script. */ #include <fcntl.h> #include <netinet/ip.h> #include "../kselftest_harness.h" #ifndef IP_LOCAL_PORT_RANGE #define IP_LOCAL_PORT_RANGE 51 #endif #ifndef IPPROTO_MPTCP #define IPPROTO_MPTCP 262 #endif static __u32 pack_port_range(__u16 lo, __u16 hi) { return (hi << 16) | (lo << 0); } static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi) { *lo = range & 0xffff; *hi = range >> 16; } static int get_so_domain(int fd) { int domain, err; socklen_t len; len = sizeof(domain); err = getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &domain, &len); if (err) return -1; return domain; } static int bind_to_loopback_any_port(int fd) { union { struct sockaddr sa; struct sockaddr_in v4; struct sockaddr_in6 v6; } addr; socklen_t addr_len; memset(&addr, 0, sizeof(addr)); switch (get_so_domain(fd)) { case AF_INET: addr.v4.sin_family = AF_INET; addr.v4.sin_port = htons(0); addr.v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr_len = sizeof(addr.v4); break; case AF_INET6: addr.v6.sin6_family = AF_INET6; addr.v6.sin6_port = htons(0); addr.v6.sin6_addr = in6addr_loopback; addr_len = sizeof(addr.v6); break; default: return -1; } return bind(fd, &addr.sa, addr_len); } static int get_sock_port(int fd) { union { struct sockaddr sa; struct sockaddr_in v4; struct sockaddr_in6 v6; } addr; socklen_t addr_len; int err; addr_len = sizeof(addr); memset(&addr, 0, sizeof(addr)); err = getsockname(fd, &addr.sa, &addr_len); if (err) return -1; switch (addr.sa.sa_family) { case AF_INET: return ntohs(addr.v4.sin_port); case AF_INET6: return ntohs(addr.v6.sin6_port); default: errno = EAFNOSUPPORT; return -1; } } static int get_ip_local_port_range(int fd, __u32 *range) { socklen_t len; __u32 val; int err; len = sizeof(val); err = getsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val, &len); if (err) return -1; *range = val; return 0; } FIXTURE(ip_local_port_range) {}; FIXTURE_SETUP(ip_local_port_range) { } FIXTURE_TEARDOWN(ip_local_port_range) { } FIXTURE_VARIANT(ip_local_port_range) { int so_domain; int so_type; int so_protocol; }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_tcp) { .so_domain = AF_INET, .so_type = SOCK_STREAM, .so_protocol = 0, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_udp) { .so_domain = AF_INET, .so_type = SOCK_DGRAM, .so_protocol = 0, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_stcp) { .so_domain = AF_INET, .so_type = SOCK_STREAM, .so_protocol = IPPROTO_SCTP, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip4_mptcp) { .so_domain = AF_INET, .so_type = SOCK_STREAM, .so_protocol = IPPROTO_MPTCP, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_tcp) { .so_domain = AF_INET6, .so_type = SOCK_STREAM, .so_protocol = 0, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_udp) { .so_domain = AF_INET6, .so_type = SOCK_DGRAM, .so_protocol = 0, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_stcp) { .so_domain = AF_INET6, .so_type = SOCK_STREAM, .so_protocol = IPPROTO_SCTP, }; FIXTURE_VARIANT_ADD(ip_local_port_range, ip6_mptcp) { .so_domain = AF_INET6, .so_type = SOCK_STREAM, .so_protocol = IPPROTO_MPTCP, }; TEST_F(ip_local_port_range, invalid_option_value) { __u16 val16; __u32 val32; __u64 val64; int fd, err; fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("socket failed"); /* Too few bytes */ val16 = 40000; err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val16, sizeof(val16)); EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail"); EXPECT_EQ(errno, EINVAL); /* Empty range: low port > high port */ val32 = pack_port_range(40222, 40111); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val32, sizeof(val32)); EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail"); EXPECT_EQ(errno, EINVAL); /* Too many bytes */ val64 = pack_port_range(40333, 40444); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &val64, sizeof(val64)); EXPECT_TRUE(err) TH_LOG("expected setsockopt(IP_LOCAL_PORT_RANGE) to fail"); EXPECT_EQ(errno, EINVAL); err = close(fd); ASSERT_TRUE(!err) TH_LOG("close failed"); } TEST_F(ip_local_port_range, port_range_out_of_netns_range) { const struct test { __u16 range_lo; __u16 range_hi; } tests[] = { { 30000, 39999 }, /* socket range below netns range */ { 50000, 59999 }, /* socket range above netns range */ }; const struct test *t; for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { /* Bind a couple of sockets, not just one, to check * that the range wasn't clamped to a single port from * the netns range. That is [40000, 40000] or [49999, * 49999], respectively for each test case. */ int fds[2], i; TH_LOG("lo %5hu, hi %5hu", t->range_lo, t->range_hi); for (i = 0; i < ARRAY_SIZE(fds); i++) { int fd, err, port; __u32 range; fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("#%d: socket failed", i); range = pack_port_range(t->range_lo, t->range_hi); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("#%d: setsockopt(IP_LOCAL_PORT_RANGE) failed", i); err = bind_to_loopback_any_port(fd); ASSERT_TRUE(!err) TH_LOG("#%d: bind failed", i); /* Check that socket port range outside of ephemeral range is ignored */ port = get_sock_port(fd); ASSERT_GE(port, 40000) TH_LOG("#%d: expected port within netns range", i); ASSERT_LE(port, 49999) TH_LOG("#%d: expected port within netns range", i); fds[i] = fd; } for (i = 0; i < ARRAY_SIZE(fds); i++) ASSERT_TRUE(close(fds[i]) == 0) TH_LOG("#%d: close failed", i); } } TEST_F(ip_local_port_range, single_port_range) { const struct test { __u16 range_lo; __u16 range_hi; __u16 expected; } tests[] = { /* single port range within ephemeral range */ { 45000, 45000, 45000 }, /* first port in the ephemeral range (clamp from above) */ { 0, 40000, 40000 }, /* last port in the ephemeral range (clamp from below) */ { 49999, 0, 49999 }, }; const struct test *t; for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { int fd, err, port; __u32 range; TH_LOG("lo %5hu, hi %5hu, expected %5hu", t->range_lo, t->range_hi, t->expected); fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("socket failed"); range = pack_port_range(t->range_lo, t->range_hi); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); err = bind_to_loopback_any_port(fd); ASSERT_TRUE(!err) TH_LOG("bind failed"); port = get_sock_port(fd); ASSERT_EQ(port, t->expected) TH_LOG("unexpected local port"); err = close(fd); ASSERT_TRUE(!err) TH_LOG("close failed"); } } TEST_F(ip_local_port_range, exhaust_8_port_range) { __u8 port_set = 0; int i, fd, err; __u32 range; __u16 port; int fds[8]; for (i = 0; i < ARRAY_SIZE(fds); i++) { fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("socket failed"); range = pack_port_range(40000, 40007); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); err = bind_to_loopback_any_port(fd); ASSERT_TRUE(!err) TH_LOG("bind failed"); port = get_sock_port(fd); ASSERT_GE(port, 40000) TH_LOG("expected port within sockopt range"); ASSERT_LE(port, 40007) TH_LOG("expected port within sockopt range"); port_set |= 1 << (port - 40000); fds[i] = fd; } /* Check that all every port from the test range is in use */ ASSERT_EQ(port_set, 0xff) TH_LOG("expected all ports to be busy"); /* Check that bind() fails because the whole range is busy */ fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("socket failed"); range = pack_port_range(40000, 40007); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); err = bind_to_loopback_any_port(fd); ASSERT_TRUE(err) TH_LOG("expected bind to fail"); ASSERT_EQ(errno, EADDRINUSE); err = close(fd); ASSERT_TRUE(!err) TH_LOG("close failed"); for (i = 0; i < ARRAY_SIZE(fds); i++) { err = close(fds[i]); ASSERT_TRUE(!err) TH_LOG("close failed"); } } TEST_F(ip_local_port_range, late_bind) { union { struct sockaddr sa; struct sockaddr_in v4; struct sockaddr_in6 v6; } addr; socklen_t addr_len = 0; const int one = 1; int fd, err; __u32 range; __u16 port; fd = socket(variant->so_domain, variant->so_type, 0); ASSERT_GE(fd, 0) TH_LOG("socket failed"); range = pack_port_range(40100, 40199); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); err = setsockopt(fd, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_BIND_ADDRESS_NO_PORT) failed"); err = bind_to_loopback_any_port(fd); ASSERT_TRUE(!err) TH_LOG("bind failed"); port = get_sock_port(fd); ASSERT_EQ(port, 0) TH_LOG("getsockname failed"); /* Invalid destination */ memset(&addr, 0, sizeof(addr)); switch (variant->so_domain) { case AF_INET: addr.v4.sin_family = AF_INET; addr.v4.sin_port = htons(0); addr.v4.sin_addr.s_addr = htonl(INADDR_ANY); addr_len = sizeof(addr.v4); break; case AF_INET6: addr.v6.sin6_family = AF_INET6; addr.v6.sin6_port = htons(0); addr.v6.sin6_addr = in6addr_any; addr_len = sizeof(addr.v6); break; default: ASSERT_TRUE(false) TH_LOG("unsupported socket domain"); } /* connect() doesn't need to succeed for late bind to happen */ connect(fd, &addr.sa, addr_len); port = get_sock_port(fd); ASSERT_GE(port, 40100); ASSERT_LE(port, 40199); err = close(fd); ASSERT_TRUE(!err) TH_LOG("close failed"); } XFAIL_ADD(ip_local_port_range, ip4_stcp, late_bind); XFAIL_ADD(ip_local_port_range, ip6_stcp, late_bind); TEST_F(ip_local_port_range, get_port_range) { __u16 lo, hi; __u32 range; int fd, err; fd = socket(variant->so_domain, variant->so_type, variant->so_protocol); ASSERT_GE(fd, 0) TH_LOG("socket failed"); /* Get range before it will be set */ err = get_ip_local_port_range(fd, &range); ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed"); unpack_port_range(range, &lo, &hi); ASSERT_EQ(lo, 0) TH_LOG("unexpected low port"); ASSERT_EQ(hi, 0) TH_LOG("unexpected high port"); range = pack_port_range(12345, 54321); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); /* Get range after it has been set */ err = get_ip_local_port_range(fd, &range); ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed"); unpack_port_range(range, &lo, &hi); ASSERT_EQ(lo, 12345) TH_LOG("unexpected low port"); ASSERT_EQ(hi, 54321) TH_LOG("unexpected high port"); /* Unset the port range */ range = pack_port_range(0, 0); err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); ASSERT_TRUE(!err) TH_LOG("setsockopt(IP_LOCAL_PORT_RANGE) failed"); /* Get range after it has been unset */ err = get_ip_local_port_range(fd, &range); ASSERT_TRUE(!err) TH_LOG("getsockopt(IP_LOCAL_PORT_RANGE) failed"); unpack_port_range(range, &lo, &hi); ASSERT_EQ(lo, 0) TH_LOG("unexpected low port"); ASSERT_EQ(hi, 0) TH_LOG("unexpected high port"); err = close(fd); ASSERT_TRUE(!err) TH_LOG("close failed"); } TEST_HARNESS_MAIN
/* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c-gpio interface to platform code * * Copyright (C) 2007 Atmel Corporation */ #ifndef _LINUX_I2C_GPIO_H #define _LINUX_I2C_GPIO_H /** * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz * @timeout: clock stretching timeout in jiffies. If the slave keeps * SCL low for longer than this, the transfer will time out. * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin * isn't actively driven high when setting the output value high. * gpio_get_value() must return the actual pin state even if the * pin is configured as an output. * @sda_is_output_only: SDA output drivers can't be turned off. * This is for clients that can only read SDA/SCL. * @sda_has_no_pullup: SDA is used in a non-compliant way and has no pull-up. * Therefore disable open-drain. * @scl_is_open_drain: SCL is set up as open drain. Same requirements * as for sda_is_open_drain apply. * @scl_is_output_only: SCL output drivers cannot be turned off. * @scl_has_no_pullup: SCL is used in a non-compliant way and has no pull-up. * Therefore disable open-drain. */ struct i2c_gpio_platform_data { int udelay; int timeout; unsigned int sda_is_open_drain:1; unsigned int sda_is_output_only:1; unsigned int sda_has_no_pullup:1; unsigned int scl_is_open_drain:1; unsigned int scl_is_output_only:1; unsigned int scl_has_no_pullup:1; }; #endif /* _LINUX_I2C_GPIO_H */
// SPDX-License-Identifier: BSD-3-Clause /* * linux/net/sunrpc/gss_krb5_mech.c * * Copyright (c) 2001-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <[email protected]> * J. Bruce Fields <[email protected]> */ #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/sunrpc/xdr.h> #include <kunit/visibility.h> #include "auth_gss_internal.h" #include "gss_krb5_internal.h" #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_AUTH #endif static struct gss_api_mech gss_kerberos_mech; static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1) /* * AES-128 with SHA-1 (RFC 3962) */ { .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96, .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128, .name = "aes128-cts", .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha1)", .derive_key = krb5_derive_key_v2, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, .signalg = -1, .sealalg = -1, .keybytes = 16, .keylength = BITS2OCTETS(128), .Kc_length = BITS2OCTETS(128), .Ke_length = BITS2OCTETS(128), .Ki_length = BITS2OCTETS(128), .cksumlength = BITS2OCTETS(96), .keyed_cksum = 1, }, /* * AES-256 with SHA-1 (RFC 3962) */ { .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96, .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256, .name = "aes256-cts", .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha1)", .derive_key = krb5_derive_key_v2, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, .signalg = -1, .sealalg = -1, .keybytes = 32, .keylength = BITS2OCTETS(256), .Kc_length = BITS2OCTETS(256), .Ke_length = BITS2OCTETS(256), .Ki_length = BITS2OCTETS(256), .cksumlength = BITS2OCTETS(96), .keyed_cksum = 1, }, #endif #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA) /* * Camellia-128 with CMAC (RFC 6803) */ { .etype = ENCTYPE_CAMELLIA128_CTS_CMAC, .ctype = CKSUMTYPE_CMAC_CAMELLIA128, .name = "camellia128-cts-cmac", .encrypt_name = "cts(cbc(camellia))", .aux_cipher = "cbc(camellia)", .cksum_name = "cmac(camellia)", .cksumlength = BITS2OCTETS(128), .keyed_cksum = 1, .keylength = BITS2OCTETS(128), .Kc_length = BITS2OCTETS(128), .Ke_length = BITS2OCTETS(128), .Ki_length = BITS2OCTETS(128), .derive_key = krb5_kdf_feedback_cmac, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, }, /* * Camellia-256 with CMAC (RFC 6803) */ { .etype = ENCTYPE_CAMELLIA256_CTS_CMAC, .ctype = CKSUMTYPE_CMAC_CAMELLIA256, .name = "camellia256-cts-cmac", .encrypt_name = "cts(cbc(camellia))", .aux_cipher = "cbc(camellia)", .cksum_name = "cmac(camellia)", .cksumlength = BITS2OCTETS(128), .keyed_cksum = 1, .keylength = BITS2OCTETS(256), .Kc_length = BITS2OCTETS(256), .Ke_length = BITS2OCTETS(256), .Ki_length = BITS2OCTETS(256), .derive_key = krb5_kdf_feedback_cmac, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, }, #endif #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2) /* * AES-128 with SHA-256 (RFC 8009) */ { .etype = ENCTYPE_AES128_CTS_HMAC_SHA256_128, .ctype = CKSUMTYPE_HMAC_SHA256_128_AES128, .name = "aes128-cts-hmac-sha256-128", .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha256)", .cksumlength = BITS2OCTETS(128), .keyed_cksum = 1, .keylength = BITS2OCTETS(128), .Kc_length = BITS2OCTETS(128), .Ke_length = BITS2OCTETS(128), .Ki_length = BITS2OCTETS(128), .derive_key = krb5_kdf_hmac_sha2, .encrypt = krb5_etm_encrypt, .decrypt = krb5_etm_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, }, /* * AES-256 with SHA-384 (RFC 8009) */ { .etype = ENCTYPE_AES256_CTS_HMAC_SHA384_192, .ctype = CKSUMTYPE_HMAC_SHA384_192_AES256, .name = "aes256-cts-hmac-sha384-192", .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha384)", .cksumlength = BITS2OCTETS(192), .keyed_cksum = 1, .keylength = BITS2OCTETS(256), .Kc_length = BITS2OCTETS(192), .Ke_length = BITS2OCTETS(256), .Ki_length = BITS2OCTETS(192), .derive_key = krb5_kdf_hmac_sha2, .encrypt = krb5_etm_encrypt, .decrypt = krb5_etm_decrypt, .get_mic = gss_krb5_get_mic_v2, .verify_mic = gss_krb5_verify_mic_v2, .wrap = gss_krb5_wrap_v2, .unwrap = gss_krb5_unwrap_v2, }, #endif }; /* * The list of advertised enctypes is specified in order of most * preferred to least. */ static char gss_krb5_enctype_priority_list[64]; static void gss_krb5_prepare_enctype_priority_list(void) { static const u32 gss_krb5_enctypes[] = { #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2) ENCTYPE_AES256_CTS_HMAC_SHA384_192, ENCTYPE_AES128_CTS_HMAC_SHA256_128, #endif #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA) ENCTYPE_CAMELLIA256_CTS_CMAC, ENCTYPE_CAMELLIA128_CTS_CMAC, #endif #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1) ENCTYPE_AES256_CTS_HMAC_SHA1_96, ENCTYPE_AES128_CTS_HMAC_SHA1_96, #endif }; size_t total, i; char buf[16]; char *sep; int n; sep = ""; gss_krb5_enctype_priority_list[0] = '\0'; for (total = 0, i = 0; i < ARRAY_SIZE(gss_krb5_enctypes); i++) { n = sprintf(buf, "%s%u", sep, gss_krb5_enctypes[i]); if (n < 0) break; if (total + n >= sizeof(gss_krb5_enctype_priority_list)) break; strcat(gss_krb5_enctype_priority_list, buf); sep = ","; total += n; } } /** * gss_krb5_lookup_enctype - Retrieve profile information for a given enctype * @etype: ENCTYPE value * * Returns a pointer to a gss_krb5_enctype structure, or NULL if no * matching etype is found. */ VISIBLE_IF_KUNIT const struct gss_krb5_enctype *gss_krb5_lookup_enctype(u32 etype) { size_t i; for (i = 0; i < ARRAY_SIZE(supported_gss_krb5_enctypes); i++) if (supported_gss_krb5_enctypes[i].etype == etype) return &supported_gss_krb5_enctypes[i]; return NULL; } EXPORT_SYMBOL_IF_KUNIT(gss_krb5_lookup_enctype); static struct crypto_sync_skcipher * gss_krb5_alloc_cipher_v2(const char *cname, const struct xdr_netobj *key) { struct crypto_sync_skcipher *tfm; tfm = crypto_alloc_sync_skcipher(cname, 0, 0); if (IS_ERR(tfm)) return NULL; if (crypto_sync_skcipher_setkey(tfm, key->data, key->len)) { crypto_free_sync_skcipher(tfm); return NULL; } return tfm; } static struct crypto_ahash * gss_krb5_alloc_hash_v2(struct krb5_ctx *kctx, const struct xdr_netobj *key) { struct crypto_ahash *tfm; tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return NULL; if (crypto_ahash_setkey(tfm, key->data, key->len)) { crypto_free_ahash(tfm); return NULL; } return tfm; } static int gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) { struct xdr_netobj keyin = { .len = ctx->gk5e->keylength, .data = ctx->Ksess, }; struct xdr_netobj keyout; int ret = -EINVAL; keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); if (!keyout.data) return -ENOMEM; /* initiator seal encryption */ keyout.len = ctx->gk5e->Ke_length; if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) goto out; ctx->initiator_enc = gss_krb5_alloc_cipher_v2(ctx->gk5e->encrypt_name, &keyout); if (ctx->initiator_enc == NULL) goto out; if (ctx->gk5e->aux_cipher) { ctx->initiator_enc_aux = gss_krb5_alloc_cipher_v2(ctx->gk5e->aux_cipher, &keyout); if (ctx->initiator_enc_aux == NULL) goto out_free; } /* acceptor seal encryption */ if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) goto out_free; ctx->acceptor_enc = gss_krb5_alloc_cipher_v2(ctx->gk5e->encrypt_name, &keyout); if (ctx->acceptor_enc == NULL) goto out_free; if (ctx->gk5e->aux_cipher) { ctx->acceptor_enc_aux = gss_krb5_alloc_cipher_v2(ctx->gk5e->aux_cipher, &keyout); if (ctx->acceptor_enc_aux == NULL) goto out_free; } /* initiator sign checksum */ keyout.len = ctx->gk5e->Kc_length; if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM, gfp_mask)) goto out_free; ctx->initiator_sign = gss_krb5_alloc_hash_v2(ctx, &keyout); if (ctx->initiator_sign == NULL) goto out_free; /* acceptor sign checksum */ if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM, gfp_mask)) goto out_free; ctx->acceptor_sign = gss_krb5_alloc_hash_v2(ctx, &keyout); if (ctx->acceptor_sign == NULL) goto out_free; /* initiator seal integrity */ keyout.len = ctx->gk5e->Ki_length; if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY, gfp_mask)) goto out_free; ctx->initiator_integ = gss_krb5_alloc_hash_v2(ctx, &keyout); if (ctx->initiator_integ == NULL) goto out_free; /* acceptor seal integrity */ if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY, gfp_mask)) goto out_free; ctx->acceptor_integ = gss_krb5_alloc_hash_v2(ctx, &keyout); if (ctx->acceptor_integ == NULL) goto out_free; ret = 0; out: kfree_sensitive(keyout.data); return ret; out_free: crypto_free_ahash(ctx->acceptor_integ); crypto_free_ahash(ctx->initiator_integ); crypto_free_ahash(ctx->acceptor_sign); crypto_free_ahash(ctx->initiator_sign); crypto_free_sync_skcipher(ctx->acceptor_enc_aux); crypto_free_sync_skcipher(ctx->acceptor_enc); crypto_free_sync_skcipher(ctx->initiator_enc_aux); crypto_free_sync_skcipher(ctx->initiator_enc); goto out; } static int gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) { u64 seq_send64; int keylen; u32 time32; int ret; p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); if (IS_ERR(p)) goto out_err; ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; p = simple_get_bytes(p, end, &time32, sizeof(time32)); if (IS_ERR(p)) goto out_err; /* unsigned 32-bit time overflows in year 2106 */ ctx->endtime = (time64_t)time32; p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64)); if (IS_ERR(p)) goto out_err; atomic64_set(&ctx->seq_send64, seq_send64); /* set seq_send for use by "older" enctypes */ atomic_set(&ctx->seq_send, seq_send64); if (seq_send64 != atomic_read(&ctx->seq_send)) { dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__, seq_send64, atomic_read(&ctx->seq_send)); p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); if (IS_ERR(p)) goto out_err; ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype); if (ctx->gk5e == NULL) { dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", ctx->enctype); p = ERR_PTR(-EINVAL); goto out_err; } keylen = ctx->gk5e->keylength; p = simple_get_bytes(p, end, ctx->Ksess, keylen); if (IS_ERR(p)) goto out_err; if (p != end) { p = ERR_PTR(-EINVAL); goto out_err; } ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, gss_kerberos_mech.gm_oid.len, gfp_mask); if (unlikely(ctx->mech_used.data == NULL)) { p = ERR_PTR(-ENOMEM); goto out_err; } ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; ret = gss_krb5_import_ctx_v2(ctx, gfp_mask); if (ret) { p = ERR_PTR(ret); goto out_free; } return 0; out_free: kfree(ctx->mech_used.data); out_err: return PTR_ERR(p); } static int gss_krb5_import_sec_context(const void *p, size_t len, struct gss_ctx *ctx_id, time64_t *endtime, gfp_t gfp_mask) { const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), gfp_mask); if (ctx == NULL) return -ENOMEM; ret = gss_import_v2_context(p, end, ctx, gfp_mask); memzero_explicit(&ctx->Ksess, sizeof(ctx->Ksess)); if (ret) { kfree(ctx); return ret; } ctx_id->internal_ctx_id = ctx; if (endtime) *endtime = ctx->endtime; return 0; } static void gss_krb5_delete_sec_context(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_sync_skcipher(kctx->seq); crypto_free_sync_skcipher(kctx->enc); crypto_free_sync_skcipher(kctx->acceptor_enc); crypto_free_sync_skcipher(kctx->initiator_enc); crypto_free_sync_skcipher(kctx->acceptor_enc_aux); crypto_free_sync_skcipher(kctx->initiator_enc_aux); crypto_free_ahash(kctx->acceptor_sign); crypto_free_ahash(kctx->initiator_sign); crypto_free_ahash(kctx->acceptor_integ); crypto_free_ahash(kctx->initiator_integ); kfree(kctx->mech_used.data); kfree(kctx); } /** * gss_krb5_get_mic - get_mic for the Kerberos GSS mechanism * @gctx: GSS context * @text: plaintext to checksum * @token: buffer into which to write the computed checksum * * Return values: * %GSS_S_COMPLETE - success, and @token is filled in * %GSS_S_FAILURE - checksum could not be generated * %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid */ static u32 gss_krb5_get_mic(struct gss_ctx *gctx, struct xdr_buf *text, struct xdr_netobj *token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; return kctx->gk5e->get_mic(kctx, text, token); } /** * gss_krb5_verify_mic - verify_mic for the Kerberos GSS mechanism * @gctx: GSS context * @message_buffer: plaintext to check * @read_token: received checksum to check * * Return values: * %GSS_S_COMPLETE - computed and received checksums match * %GSS_S_DEFECTIVE_TOKEN - received checksum is not valid * %GSS_S_BAD_SIG - computed and received checksums do not match * %GSS_S_FAILURE - received checksum could not be checked * %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid */ static u32 gss_krb5_verify_mic(struct gss_ctx *gctx, struct xdr_buf *message_buffer, struct xdr_netobj *read_token) { struct krb5_ctx *kctx = gctx->internal_ctx_id; return kctx->gk5e->verify_mic(kctx, message_buffer, read_token); } /** * gss_krb5_wrap - gss_wrap for the Kerberos GSS mechanism * @gctx: initialized GSS context * @offset: byte offset in @buf to start writing the cipher text * @buf: OUT: send buffer * @pages: plaintext to wrap * * Return values: * %GSS_S_COMPLETE - success, @buf has been updated * %GSS_S_FAILURE - @buf could not be wrapped * %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid */ static u32 gss_krb5_wrap(struct gss_ctx *gctx, int offset, struct xdr_buf *buf, struct page **pages) { struct krb5_ctx *kctx = gctx->internal_ctx_id; return kctx->gk5e->wrap(kctx, offset, buf, pages); } /** * gss_krb5_unwrap - gss_unwrap for the Kerberos GSS mechanism * @gctx: initialized GSS context * @offset: starting byte offset into @buf * @len: size of ciphertext to unwrap * @buf: ciphertext to unwrap * * Return values: * %GSS_S_COMPLETE - success, @buf has been updated * %GSS_S_DEFECTIVE_TOKEN - received blob is not valid * %GSS_S_BAD_SIG - computed and received checksums do not match * %GSS_S_FAILURE - @buf could not be unwrapped * %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid */ static u32 gss_krb5_unwrap(struct gss_ctx *gctx, int offset, int len, struct xdr_buf *buf) { struct krb5_ctx *kctx = gctx->internal_ctx_id; return kctx->gk5e->unwrap(kctx, offset, len, buf, &gctx->slack, &gctx->align); } static const struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_krb5_import_sec_context, .gss_get_mic = gss_krb5_get_mic, .gss_verify_mic = gss_krb5_verify_mic, .gss_wrap = gss_krb5_wrap, .gss_unwrap = gss_krb5_unwrap, .gss_delete_sec_context = gss_krb5_delete_sec_context, }; static struct pf_desc gss_kerberos_pfs[] = { [0] = { .pseudoflavor = RPC_AUTH_GSS_KRB5, .qop = GSS_C_QOP_DEFAULT, .service = RPC_GSS_SVC_NONE, .name = "krb5", }, [1] = { .pseudoflavor = RPC_AUTH_GSS_KRB5I, .qop = GSS_C_QOP_DEFAULT, .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", .datatouch = true, }, [2] = { .pseudoflavor = RPC_AUTH_GSS_KRB5P, .qop = GSS_C_QOP_DEFAULT, .service = RPC_GSS_SVC_PRIVACY, .name = "krb5p", .datatouch = true, }, }; MODULE_ALIAS("rpc-auth-gss-krb5"); MODULE_ALIAS("rpc-auth-gss-krb5i"); MODULE_ALIAS("rpc-auth-gss-krb5p"); MODULE_ALIAS("rpc-auth-gss-390003"); MODULE_ALIAS("rpc-auth-gss-390004"); MODULE_ALIAS("rpc-auth-gss-390005"); MODULE_ALIAS("rpc-auth-gss-1.2.840.113554.1.2.2"); static struct gss_api_mech gss_kerberos_mech = { .gm_name = "krb5", .gm_owner = THIS_MODULE, .gm_oid = { 9, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }, .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, .gm_upcall_enctypes = gss_krb5_enctype_priority_list, }; static int __init init_kerberos_module(void) { int status; gss_krb5_prepare_enctype_priority_list(); status = gss_mech_register(&gss_kerberos_mech); if (status) printk("Failed to register kerberos gss mechanism!\n"); return status; } static void __exit cleanup_kerberos_module(void) { gss_mech_unregister(&gss_kerberos_mech); } MODULE_DESCRIPTION("Sun RPC Kerberos 5 module"); MODULE_LICENSE("GPL"); module_init(init_kerberos_module); module_exit(cleanup_kerberos_module);
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" #include "dm_helpers.h" #include "dc_hw_types.h" #include "core_types.h" #include "../basics/conversion.h" #include "cursor_reg_cache.h" #include "resource.h" #include "clk_mgr.h" #include "dc_state_priv.h" #include "dc_plane_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, struct dmub_srv *dmub) { dc_srv->dmub = dmub; dc_srv->ctx = dc->ctx; } struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) { struct dc_dmub_srv *dc_srv = kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); if (dc_srv == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dc_dmub_srv_construct(dc_srv, dc, dmub); return dc_srv; } void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) { if (*dmub_srv) { kfree(*dmub_srv); *dmub_srv = NULL; } } void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; do { status = dmub_srv_wait_for_idle(dmub, 100000); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } } void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_clear_inbox0_ack(dmub); if (status != DMUB_STATUS_OK) { DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } } void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } } void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, union dmub_inbox0_data_register data) { struct dmub_srv *dmub = dc_dmub_srv->dmub; struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_send_inbox0_cmd(dmub, data); if (status != DMUB_STATUS_OK) { DC_ERROR("Error sending INBOX0 cmd\n"); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } } bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list) { struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { // Queue command status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ status = dmub_srv_cmd_execute(dmub); if (status == DMUB_STATUS_POWER_STATE_D3) return false; do { status = dmub_srv_wait_for_idle(dmub, 100000); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); /* Requeue the command. */ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); } if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error queueing DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } return false; } return true; } bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, enum dm_dmub_wait_type wait_type, union dmub_rb_cmd *cmd_list) { struct dmub_srv *dmub; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { do { status = dmub_srv_wait_for_idle(dmub, 100000); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); if (!dmub->debug.timeout_occured) { dmub->debug.timeout_occured = true; dmub->debug.timeout_cmd = *cmd_list; dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } // Copy data back from ring buffer into command if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); } return true; } bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); } bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { // Queue command status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ status = dmub_srv_cmd_execute(dmub); if (status == DMUB_STATUS_POWER_STATE_D3) return false; status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) return false; /* Requeue the command. */ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); } if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error queueing DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } return false; } // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { do { status = dmub_srv_wait_for_idle(dmub, 100000); } while (status != DMUB_STATUS_OK); } else status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } // Copy data back from ring buffer into command if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); } return true; } bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub; struct dc_context *dc_ctx; union dmub_fw_boot_status boot_status; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; dc_ctx = dc_dmub_srv->ctx; status = dmub_srv_get_fw_boot_status(dmub, &boot_status); if (status != DMUB_STATUS_OK) { DC_ERROR("Error querying DMUB boot status: error=%d\n", status); return false; } return boot_status.bits.optimized_init_done; } bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask) { if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub; struct dc_context *dc_ctx; union dmub_fw_boot_status boot_status; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; dc_ctx = dc_dmub_srv->ctx; status = dmub_srv_get_fw_boot_status(dmub, &boot_status); if (status != DMUB_STATUS_OK) { DC_ERROR("Error querying DMUB boot status: error=%d\n", status); return false; } return boot_status.bits.restore_required; } bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) { struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; return dmub_srv_get_outbox0_msg(dmub, entry); } void dc_dmub_trace_event_control(struct dc *dc, bool enable) { dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); } void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) { union dmub_rb_cmd cmd = { 0 }; cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) { union dmub_rb_cmd cmd = { 0 }; cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) { uint8_t pipes = 0; int i = 0; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) pipes = i; } return pipes; } static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, struct pipe_ctx *head_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) { int j; int pipe_idx = 0; fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; } } fams_pipe_data->pipe_count = pipe_idx; } bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) { union dmub_rb_cmd cmd = { 0 }; struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; int i = 0, k = 0; int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. uint8_t visual_confirm_enabled; int pipe_idx = 0; struct dc_stream_status *stream_status = NULL; if (dc == NULL) return false; visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; // Format command. cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; if (should_manage_pstate) { for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) continue; /* If FAMS is being used to support P-State and there is a stream * that does not use FAMS, we are in an FPO + VActive scenario. * Assign vactive stretch margin in this case. */ stream_status = dc_state_get_stream_status(context, pipe->stream); if (stream_status && !stream_status->fpo_in_use) { cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break; } pipe_idx++; } } for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!resource_is_pipe_type(pipe, OTG_MASTER)) continue; stream_status = dc_state_get_stream_status(context, pipe->stream); if (stream_status && stream_status->fpo_in_use) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream); dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]); k++; } } cmd.fw_assisted_mclk_switch.header.payload_bytes = sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return; memset(&cmd, 0, sizeof(cmd)); /* Prepare fw command */ cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; cmd.query_feature_caps.header.sub_type = 0; cmd.query_feature_caps.header.ret_status = 1; cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); /* If command was processed, copy feature caps to dmub srv */ if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, sizeof(struct dmub_feature_caps)); } } void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) { union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) return; memset(&cmd, 0, sizeof(cmd)); // Prepare fw command cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; cmd.visual_confirm_color.header.sub_type = 0; cmd.visual_confirm_color.header.ret_status = 1; cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; // If command was processed, copy feature caps to dmub srv if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, sizeof(struct dmub_visual_confirm_color)); } } /** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * * @dc: [in] pointer to dc object * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info * @context: [in] DC state for access to phantom stream * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. * * High level algorithm: * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule * 3. Populate the drr_info with the min and max supported vtotal values */ static void populate_subvp_cmd_drr_info(struct dc *dc, struct dc_state *context, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) { struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; uint16_t drr_frame_us = 0; uint16_t min_drr_supported_us = 0; uint16_t max_drr_supported_us = 0; uint16_t max_drr_vblank_us = 0; uint16_t max_drr_mallregion_us = 0; uint16_t mall_region_us = 0; uint16_t prefetch_us = 0; uint16_t subvp_active_us = 0; uint16_t drr_active_us = 0; uint16_t min_vtotal_supported = 0; uint16_t max_vtotal_supported = 0; if (!phantom_stream) return; phantom_timing = &phantom_stream->timing; pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), (((uint64_t)drr_timing->pix_clk_100hz * 100))); // P-State allow width and FW delays already included phantom_timing->v_addressable mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), (((uint64_t)phantom_timing->pix_clk_100hz * 100))); min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), (((uint64_t)drr_timing->h_total * 1000000))); prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), (((uint64_t)main_timing->pix_clk_100hz * 100))); drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), (((uint64_t)drr_timing->pix_clk_100hz * 100))); max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), (((uint64_t)drr_timing->h_total * 1000000))); /* When calculating the max vtotal supported for SubVP + DRR cases, add * margin due to possible rounding errors (being off by 1 line in the * FW calculation can incorrectly push the P-State switch to wait 1 frame * longer). */ max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; } /** * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with VBLANK pipe info. All the information * required to calculate the microschedule for SubVP + VBLANK case is stored in * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe * is a DRR display -- if it is make a call to populate drr_info. */ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *vblank_pipe, uint8_t cmd_pipe_index) { uint32_t i; struct pipe_ctx *pipe = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; // Find the SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!resource_is_pipe_type(pipe, OTG_MASTER) || !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } pipe_data->mode = VBLANK; pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch; pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; pipe_data->pipe_config.vblank_data.vblank_end = vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; if (vblank_pipe->stream->ignore_msa_timing_param && (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); } /** * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) * * For SubVP + SubVP, we use a single vertical interrupt to start the * microschedule for both SubVP pipes. In order for this to work correctly, the * MALL REGION of both SubVP pipes must start at the same time. This function * lengthens the prefetch end to mall start delay of the SubVP pipe that has * the shorter prefetch so that both MALL REGION's will start at the same time. */ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipes[]) { uint32_t subvp0_prefetch_us = 0; uint32_t subvp1_prefetch_us = 0; uint32_t prefetch_delta_us = 0; struct dc_stream_state *phantom_stream0 = NULL; struct dc_stream_state *phantom_stream1 = NULL; struct dc_crtc_timing *phantom_timing0 = NULL; struct dc_crtc_timing *phantom_timing1 = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); if (!phantom_stream0) return; phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); if (!phantom_stream1) return; phantom_timing0 = &phantom_stream0->timing; phantom_timing1 = &phantom_stream1->timing; subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * (uint64_t)phantom_timing0->h_total * 1000000), (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * (uint64_t)phantom_timing1->h_total * 1000000), (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) // should increase it's prefetch time to match the other if (subvp0_prefetch_us > subvp1_prefetch_us) { pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), ((uint64_t)phantom_timing1->h_total * 1000000)); } else if (subvp1_prefetch_us > subvp0_prefetch_us) { pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), ((uint64_t)phantom_timing0->h_total * 1000000)); } } /** * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with SubVP pipe info. All the information * required to calculate the microschedule for the SubVP pipe is stored in the * pipe_data of the DMCUB SubVP command. */ static void populate_subvp_cmd_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipe, uint8_t cmd_pipe_index) { uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing; uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; if (!phantom_stream) return; phantom_timing = &phantom_stream->timing; pipe_data->mode = SUBVP; pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; pipe_data->pipe_config.subvp_data.main_vblank_start = main_timing->v_total - main_timing->v_front_porch; pipe_data->pipe_config.subvp_data.main_vblank_end = main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param && (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed); /* Calculate the scaling factor from the src and dst height. * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" * * Make sure to combine stream and plane scaling together. */ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num_stream, &out_den_stream); reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height, &out_num_plane, &out_den_plane); reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den); pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; // Prefetch lines is equal to VACTIVE + BP + VSYNC pipe_data->pipe_config.subvp_data.prefetch_lines = phantom_timing->v_total - phantom_timing->v_front_porch; // Round up pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); pipe_data->pipe_config.subvp_data.processing_delay_lines = div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); if (subvp_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; } else if (subvp_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; } else { pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; } // Find phantom pipe index based on phantom stream for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; } else if (phantom_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; } else { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; } break; } } } /** * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command * * @dc: [in] current dc state * @context: [in] new dc state * @enable: [in] if true enables the pipes population * * This function loops through each pipe and populates the DMUB SubVP CMD info * based on the pipe (e.g. SubVP, VBLANK). */ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable) { uint8_t cmd_pipe_index = 0; uint32_t i, pipe_idx; uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; uint32_t wm_val_refclk = 0; enum mall_stream_type pipe_mall_type; memset(&cmd, 0, sizeof(cmd)); // FW command for SUBVP cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; /* For SubVP pipe count, only count the top most (ODM / MPC) pipe */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. * Any ODM or MPC splits being used in SubVP will be handled internally in * populate_subvp_cmd_pipe_info */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe_mall_type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } else if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe_mall_type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } pipe_idx++; } if (subvp_count == 2) { update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); } cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; // Store the original watermark value for this SubVP config so we can lower it when the // MCLK switch starts wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) { if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) return false; return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); } void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_diagnostic_data diag_data = {0}; uint32_t i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; } DC_LOG_DEBUG("DMCUB STATE:"); DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr); DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr); DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size); DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); } static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) { struct pipe_ctx *test_pipe, *split_pipe; const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; struct rect r1 = scl_data->recout, r2, r2_half; int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; int cur_layer = pipe_ctx->plane_state->layer_index; /** * Disable the cursor if there's another pipe above this with a * plane that contains this pipe's viewport to prevent double cursor * and incorrect scaling artifacts. */ for (test_pipe = pipe_ctx->top_pipe; test_pipe; test_pipe = test_pipe->top_pipe) { // Skip invisible layer and pipe-split plane on same layer if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) continue; r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; /** * There is another half plane on same layer because of * pipe-split, merge together per same height. */ for (split_pipe = pipe_ctx->top_pipe; split_pipe; split_pipe = split_pipe->top_pipe) if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { r2_half = split_pipe->plane_res.scl_data.recout; r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; r2.width = r2.width + r2_half.width; r2_r = r2.x + r2.width; break; } if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) return true; } return false; } static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state != NULL) { if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) return false; if (dc_can_pipe_disable_cursor(pipe_ctx)) return false; } if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) return true; if (pipe_ctx->stream->link->replay_settings.config.replay_supported) return true; return false; } static void dc_build_cursor_update_payload0( struct pipe_ctx *pipe_ctx, uint8_t p_idx, struct dmub_cmd_update_cursor_payload0 *payload) { struct hubp *hubp = pipe_ctx->plane_res.hubp; unsigned int panel_inst = 0; if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst)) return; /* Payload: Cursor Rect is built from position & attribute * x & y are obtained from postion */ payload->cursor_rect.x = hubp->cur_rect.x; payload->cursor_rect.y = hubp->cur_rect.y; /* w & h are obtained from attribute */ payload->cursor_rect.width = hubp->cur_rect.w; payload->cursor_rect.height = hubp->cur_rect.h; payload->enable = hubp->pos.cur_ctl.bits.cur_enable; payload->pipe_idx = p_idx; payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; payload->panel_inst = panel_inst; } static void dc_build_cursor_position_update_payload0( struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) { /* Hubp */ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; /* dpp */ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; pl->position_cfg.pipe_idx = p_idx; } static void dc_build_cursor_attribute_update_payload1( struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) { /* Hubp */ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; pl_A->aHubp.size.raw = hubp->att.size.raw; pl_A->aHubp.settings.raw = hubp->att.settings.raw; /* dpp */ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; } /** * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command * * @pCtx: [in] pipe context * @pipe_idx: [in] pipe index * * This function would store the cursor related information and pass it into * dmub */ void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx) { union dmub_rb_cmd cmd[2]; union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = &cmd[0].update_cursor_info.update_cursor_info_data; memset(cmd, 0, sizeof(cmd)); if (!dc_dmub_should_update_cursor_data(pCtx)) return; /* * Since we use multi_cmd_pending for dmub command, the 2nd command is * only assigned to store cursor attributes info. * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other * is to store cursor position info. * * Command heaer type must be the same type if using multi_cmd_pending. * Besides, while process 2nd command in DMU, the sub type is useless. * So it's meanless to pass the sub type header with different type. */ { /* Build Payload#0 Header */ cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; cmd[0].update_cursor_info.header.payload_bytes = sizeof(cmd[0].update_cursor_info.update_cursor_info_data); cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd /* Prepare Payload */ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); } { /* Build Payload#1 Header */ cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. dc_build_cursor_attribute_update_payload1( &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } bool dc_dmub_check_min_version(struct dmub_srv *srv) { if (!srv->hw_funcs.is_psrsu_supported) return true; return srv->hw_funcs.is_psrsu_supported(srv); } void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } DC_LOG_DEBUG("Enabled DPIA trace\n"); } void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index) { dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); } bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { struct dc_context *dc_ctx; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return true; if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return true; dc_ctx = dc_dmub_srv->ctx; if (wait) { if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { do { status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); } while (status != DMUB_STATUS_OK); } else { status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); if (status != DMUB_STATUS_OK) { DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); return false; } } } else return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); return true; } static int count_active_streams(const struct dc *dc) { int i, count = 0; for (i = 0; i < dc->current_state->stream_count; ++i) { struct dc_stream_state *stream = dc->current_state->streams[i]; if (stream && !stream->dpms_off) count += 1; } return count; } static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { volatile const struct dmub_shared_state_ips_fw *ips_fw; struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0}; if (dc->debug.dmcub_emulation) return; if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; dc_dmub_srv = dc->ctx->dmub_srv; ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; cmd.idle_opt_notify_idle.header.payload_bytes = sizeof(cmd.idle_opt_notify_idle) - sizeof(cmd.idle_opt_notify_idle.header); cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; if (dc->work_arounds.skip_psr_ips_crtc_disable) cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true; if (allow_idle) { volatile struct dmub_shared_state_ips_driver *ips_driver = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals new_signals; DC_LOG_IPS( "%s wait idle (ips1_commit=%u ips2_commit=%u)", __func__, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); memset(&new_signals, 0, sizeof(new_signals)); new_signals.bits.allow_idle = 1; /* always set */ if (dc->config.disable_ips == DMUB_IPS_ENABLE || dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { /* TODO: Move this logic out to hwseq */ if (count_active_streams(dc) == 0) { /* IPS2 - Display off */ new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; } else { /* RCG only */ new_signals.bits.allow_pg = 0; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 0; new_signals.bits.allow_z10 = 0; } } ips_driver->signals = new_signals; dc_dmub_srv->driver_signals = ips_driver->signals; } DC_LOG_IPS( "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)", __func__, allow_idle, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); /* Register access should stop at this point. */ if (allow_idle) dc_dmub_srv->needs_idle_wake = true; } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; if (dc->debug.dmcub_emulation) return; if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; dc_dmub_srv = dc->ctx->dmub_srv; if (dc->clk_mgr->funcs->exit_low_power_state) { volatile const struct dmub_shared_state_ips_fw *ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; volatile struct dmub_shared_state_ips_driver *ips_driver = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; rcg_exit_count = ips_fw->rcg_exit_count; ips1_exit_count = ips_fw->ips1_exit_count; ips2_exit_count = ips_fw->ips2_exit_count; ips_driver->signals.all = 0; dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit, ips_fw->rcg_entry_count, ips_fw->ips1_entry_count, ips_fw->ips2_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. */ dc_dmub_srv->needs_idle_wake = false; if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) udelay(dc->debug.ips2_eval_delay_us); if (ips_fw->signals.bits.ips2_commit) { DC_LOG_IPS( "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); DC_LOG_IPS( "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); // Wait for IPS2 entry upper bound udelay(dc->debug.ips2_entry_delay_us); DC_LOG_IPS( "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); DC_LOG_IPS( "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); while (ips_fw->signals.bits.ips2_commit) udelay(1); DC_LOG_IPS( "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); DC_LOG_IPS( "resync inbox1 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); while (ips_fw->signals.bits.ips1_commit) udelay(1); DC_LOG_IPS( "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", __func__, rcg_exit_count, ips1_exit_count, ips2_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) { struct dmub_srv *dmub; if (!dc_dmub_srv) return; dmub = dc_dmub_srv->dmub; if (power_state == DC_ACPI_CM_POWER_STATE_D0) dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); else dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); } void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) { union dmub_rb_cmd cmd; if (!dc_dmub_srv) return; memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT; cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE; cmd.idle_opt_set_dc_power_state.header.payload_bytes = sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header); if (power_state == DC_ACPI_CM_POWER_STATE_D0) { cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0; } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) { cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3; } else { cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN; } dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv) { volatile const struct dmub_shared_state_ips_fw *ips_fw; bool reallow_idle = false, should_detect = false; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; if (dc_dmub_srv->dmub->shared_state && dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) { ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; return ips_fw->signals.bits.detection_required; } /* Detection may require reading scratch 0 - exit out of idle prior to the read. */ if (dc_dmub_srv->idle_allowed) { dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false); reallow_idle = true; } should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub); /* Re-enter idle if we're not about to immediately redetect links. */ if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true); return should_detect; } void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return; allow_idle &= (!dc->debug.ips_disallow_entry); if (dc_dmub_srv->idle_allowed == allow_idle) return; DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); /* * Entering a low power state requires a driver notification. * Powering up the hardware requires notifying PMFW and DMCUB. * Clearing the driver idle allow requires a DMCUB command. * DMCUB commands requires the DMCUB to be powered up and restored. */ if (!allow_idle) { dc_dmub_srv->idle_exit_counter += 1; dc_dmub_srv_exit_low_power_state(dc); /* * Idle is considered fully exited only after the sequence above * fully completes. If we have a race of two threads exiting * at the same time then it's safe to perform the sequence * twice as long as we're not re-entering. * * Infinite command submission is avoided by using the * dm_execute_dmub_cmd submission instead of the "wake" helpers. */ dc_dmub_srv->idle_allowed = false; dc_dmub_srv->idle_exit_counter -= 1; if (dc_dmub_srv->idle_exit_counter < 0) { ASSERT(0); dc_dmub_srv->idle_exit_counter = 0; } } else { /* Consider idle as notified prior to the actual submission to * prevent multiple entries. */ dc_dmub_srv->idle_allowed = true; dc_dmub_srv_notify_idle(dc, allow_idle); } } bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type); } bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; bool result = false, reallow_idle = false; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; if (count == 0) return true; if (dc_dmub_srv->idle_allowed) { dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); reallow_idle = true; } /* * These may have different implementations in DM, so ensure * that we guide it to the expected helper. */ if (count > 1) result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; } static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) { struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; enum dmub_status status; if (response) *response = 0; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us); if (status != DMUB_STATUS_OK) { if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) return true; return false; } if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response); return true; } bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) { struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; bool result = false, reallow_idle = false; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; if (dc_dmub_srv->idle_allowed) { dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); reallow_idle = true; } result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; } void dc_dmub_srv_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) { uint8_t num_cmds = 1; uint32_t i; union dmub_rb_cmd cmd[MAX_STREAMS + 1]; struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config; memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1)); /* fill in generic command header */ global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); if (enable) { /* send global configuration parameters */ memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config)); /* copy static feature configuration overrides */ global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; /* construct per-stream configs */ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config; /* configure command header */ stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); stream_cmd->header.multi_cmd_pending = 1; /* copy stream static state */ memcpy(&stream_cmd->config.stream, &context->bw_ctx.bw.dcn.fams2_stream_params[i], sizeof(struct dmub_fams2_stream_static_state)); } } /* apply feature configuration based on current driver state */ global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; global_cmd->config.global.features.bits.enable = enable; if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { /* set multi pending for global, and unset for last stream cmd */ global_cmd->header.multi_cmd_pending = 1; cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0; num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams; } dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max, uint32_t vtotal_mid, uint32_t vtotal_mid_frame_num, bool program_manual_trigger) { union dmub_rb_cmd cmd = { 0 }; cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE; cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst; cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max; cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min; cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid; cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_fams2_passthrough_flip( struct dc *dc, struct dc_state *state, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, int surface_count) { int plane_index; union dmub_rb_cmd cmds[MAX_PLANES]; struct dc_plane_address *address; struct dc_plane_state *plane_state; int num_cmds = 0; struct dc_stream_status *stream_status = dc_stream_get_status(stream); if (surface_count <= 0 || stream_status == NULL) return; memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES); /* build command for each surface update */ for (plane_index = 0; plane_index < surface_count; plane_index++) { plane_state = srf_updates[plane_index].surface; address = &plane_state->address; /* skip if there is no address update for plane */ if (!srf_updates[plane_index].flip_addr) continue; /* build command header */ cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip); /* for chaining multiple commands, all but last command should set to 1 */ cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; /* set topology info */ cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state); if (stream_status) cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst; cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate; /* build address info for command */ switch (address->type) { case PLN_ADDR_TYPE_GRAPHICS: if (address->grph.addr.quad_part == 0) { BREAK_TO_DEBUGGER(); break; } cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = address->grph.meta_addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = (uint16_t)address->grph.meta_addr.high_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = address->grph.addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = (uint16_t)address->grph.addr.high_part; break; case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: if (address->video_progressive.luma_addr.quad_part == 0 || address->video_progressive.chroma_addr.quad_part == 0) { BREAK_TO_DEBUGGER(); break; } cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = address->video_progressive.luma_meta_addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = (uint16_t)address->video_progressive.luma_meta_addr.high_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo = address->video_progressive.chroma_meta_addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi = (uint16_t)address->video_progressive.chroma_meta_addr.high_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = address->video_progressive.luma_addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = (uint16_t)address->video_progressive.luma_addr.high_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo = address->video_progressive.chroma_addr.low_part; cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi = (uint16_t)address->video_progressive.chroma_addr.high_part; break; default: // Should never be hit BREAK_TO_DEBUGGER(); break; } num_cmds++; } if (num_cmds > 0) { cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0; dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); } } bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) { bool result; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); return result; } void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) { uint32_t i; enum dmub_gpint_command command_code; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return; switch (output->ips_mode) { case DMUB_IPS_MODE_IPS1_MAX: command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; break; case DMUB_IPS_MODE_IPS2: command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; break; case DMUB_IPS_MODE_IPS1_RCG: command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; break; case DMUB_IPS_MODE_IPS1_ONO2_ON: command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; break; default: command_code = DMUB_GPINT__INVALID_COMMAND; break; } if (command_code == DMUB_GPINT__INVALID_COMMAND) return; // send gpint commands and wait for ack if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, (uint16_t)(output->ips_mode), &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->residency_percent = 0; if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, (uint16_t)(output->ips_mode), &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->entry_counter = 0; if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, (uint16_t)(output->ips_mode), &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->total_active_time_us[0] = 0; if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, (uint16_t)(output->ips_mode), &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->total_active_time_us[1] = 0; if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, (uint16_t)(output->ips_mode), &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->total_inactive_time_us[0] = 0; if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, (uint16_t)(output->ips_mode), &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->total_inactive_time_us[1] = 0; // NUM_IPS_HISTOGRAM_BUCKETS = 16 for (i = 0; i < 16; i++) if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) output->histogram[i] = 0; }
// SPDX-License-Identifier: GPL-2.0 /* * SDM670 SoC device tree source, adapted from SDM845 SoC device tree * * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022, Richard Acayan. All rights reserved. */ #include <dt-bindings/clock/qcom,dispcc-sdm845.h> #include <dt-bindings/clock/qcom,gcc-sdm845.h> #include <dt-bindings/clock/qcom,rpmh.h> #include <dt-bindings/dma/qcom-gpi.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interconnect/qcom,osm-l3.h> #include <dt-bindings/interconnect/qcom,sdm670-rpmh.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/phy/phy-qcom-qusb2.h> #include <dt-bindings/power/qcom-rpmpd.h> #include <dt-bindings/soc/qcom,rpmh-rsc.h> / { interrupt-parent = <&intc>; #address-cells = <2>; #size-cells = <2>; aliases { }; chosen { }; cpus { #address-cells = <2>; #size-cells = <0>; cpu0: cpu@0 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x0>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd0>; power-domain-names = "psci"; next-level-cache = <&l2_0>; l2_0: l2-cache { compatible = "cache"; next-level-cache = <&l3_0>; cache-level = <2>; cache-unified; l3_0: l3-cache { compatible = "cache"; cache-level = <3>; cache-unified; }; }; }; cpu1: cpu@100 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x100>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd1>; power-domain-names = "psci"; next-level-cache = <&l2_100>; l2_100: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu2: cpu@200 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x200>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd2>; power-domain-names = "psci"; next-level-cache = <&l2_200>; l2_200: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu3: cpu@300 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x300>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd3>; power-domain-names = "psci"; next-level-cache = <&l2_300>; l2_300: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu4: cpu@400 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x400>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd4>; power-domain-names = "psci"; next-level-cache = <&l2_400>; l2_400: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu5: cpu@500 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x500>; enable-method = "psci"; capacity-dmips-mhz = <610>; dynamic-power-coefficient = <203>; qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd5>; power-domain-names = "psci"; next-level-cache = <&l2_500>; l2_500: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu6: cpu@600 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x600>; enable-method = "psci"; capacity-dmips-mhz = <1024>; dynamic-power-coefficient = <393>; qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu6_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd6>; power-domain-names = "psci"; next-level-cache = <&l2_600>; l2_600: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu7: cpu@700 { device_type = "cpu"; compatible = "qcom,kryo360"; reg = <0x0 0x700>; enable-method = "psci"; capacity-dmips-mhz = <1024>; dynamic-power-coefficient = <393>; qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu6_opp_table>; interconnects = <&gladiator_noc MASTER_AMPSS_M0 3 &mem_noc SLAVE_EBI_CH0 3>, <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&cpu_pd7>; power-domain-names = "psci"; next-level-cache = <&l2_700>; l2_700: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; next-level-cache = <&l3_0>; }; }; cpu-map { cluster0 { core0 { cpu = <&cpu0>; }; core1 { cpu = <&cpu1>; }; core2 { cpu = <&cpu2>; }; core3 { cpu = <&cpu3>; }; core4 { cpu = <&cpu4>; }; core5 { cpu = <&cpu5>; }; core6 { cpu = <&cpu6>; }; core7 { cpu = <&cpu7>; }; }; }; idle-states { entry-method = "psci"; little_cpu_sleep_0: cpu-sleep-0-0 { compatible = "arm,idle-state"; idle-state-name = "little-rail-power-collapse"; arm,psci-suspend-param = <0x40000004>; entry-latency-us = <702>; exit-latency-us = <915>; min-residency-us = <1617>; local-timer-stop; }; big_cpu_sleep_0: cpu-sleep-1-0 { compatible = "arm,idle-state"; idle-state-name = "big-rail-power-collapse"; arm,psci-suspend-param = <0x40000004>; entry-latency-us = <526>; exit-latency-us = <1854>; min-residency-us = <2380>; local-timer-stop; }; }; domain-idle-states { cluster_sleep_0: cluster-sleep-0 { compatible = "domain-idle-state"; arm,psci-suspend-param = <0x4100c244>; entry-latency-us = <3263>; exit-latency-us = <6562>; min-residency-us = <9825>; }; }; }; firmware { scm { compatible = "qcom,scm-sdm670", "qcom,scm"; }; }; memory@80000000 { device_type = "memory"; /* We expect the bootloader to fill in the size */ reg = <0x0 0x80000000 0x0 0x0>; }; cpu0_opp_table: opp-table-cpu0 { compatible = "operating-points-v2"; opp-shared; cpu0_opp1: opp-300000000 { opp-hz = /bits/ 64 <300000000>; opp-peak-kBps = <400000 4800000>; }; cpu0_opp2: opp-576000000 { opp-hz = /bits/ 64 <576000000>; opp-peak-kBps = <400000 4800000>; }; cpu0_opp3: opp-748800000 { opp-hz = /bits/ 64 <748800000>; opp-peak-kBps = <1200000 4800000>; }; cpu0_opp4: opp-998400000 { opp-hz = /bits/ 64 <998400000>; opp-peak-kBps = <1804000 8908800>; }; cpu0_opp5: opp-1209600000 { opp-hz = /bits/ 64 <1209600000>; opp-peak-kBps = <2188000 8908800>; }; cpu0_opp6: opp-1324800000 { opp-hz = /bits/ 64 <1324800000>; opp-peak-kBps = <2188000 13516800>; }; cpu0_opp7: opp-1516800000 { opp-hz = /bits/ 64 <1516800000>; opp-peak-kBps = <3072000 15052800>; }; cpu0_opp8: opp-1612800000 { opp-hz = /bits/ 64 <1612800000>; opp-peak-kBps = <3072000 22118400>; }; cpu0_opp9: opp-1708800000 { opp-hz = /bits/ 64 <1708800000>; opp-peak-kBps = <4068000 23040000>; }; }; cpu6_opp_table: opp-table-cpu6 { compatible = "operating-points-v2"; opp-shared; cpu6_opp1: opp-300000000 { opp-hz = /bits/ 64 <300000000>; opp-peak-kBps = <400000 4800000>; }; cpu6_opp2: opp-652800000 { opp-hz = /bits/ 64 <652800000>; opp-peak-kBps = <400000 4800000>; }; cpu6_opp3: opp-825600000 { opp-hz = /bits/ 64 <825600000>; opp-peak-kBps = <1200000 4800000>; }; cpu6_opp4: opp-979200000 { opp-hz = /bits/ 64 <979200000>; opp-peak-kBps = <1200000 4800000>; }; cpu6_opp5: opp-1132800000 { opp-hz = /bits/ 64 <1132800000>; opp-peak-kBps = <2188000 8908800>; }; cpu6_opp6: opp-1363200000 { opp-hz = /bits/ 64 <1363200000>; opp-peak-kBps = <4068000 12902400>; }; cpu6_opp7: opp-1536000000 { opp-hz = /bits/ 64 <1536000000>; opp-peak-kBps = <4068000 12902400>; }; cpu6_opp8: opp-1747200000 { opp-hz = /bits/ 64 <1747200000>; opp-peak-kBps = <4068000 15052800>; }; cpu6_opp9: opp-1843200000 { opp-hz = /bits/ 64 <1843200000>; opp-peak-kBps = <4068000 15052800>; }; cpu6_opp10: opp-1996800000 { opp-hz = /bits/ 64 <1996800000>; opp-peak-kBps = <6220000 19046400>; }; }; dsi_opp_table: opp-table-dsi { compatible = "operating-points-v2"; opp-19200000 { opp-hz = /bits/ 64 <19200000>; required-opps = <&rpmhpd_opp_min_svs>; }; opp-180000000 { opp-hz = /bits/ 64 <180000000>; required-opps = <&rpmhpd_opp_low_svs>; }; opp-275000000 { opp-hz = /bits/ 64 <275000000>; required-opps = <&rpmhpd_opp_svs>; }; opp-358000000 { opp-hz = /bits/ 64 <358000000>; required-opps = <&rpmhpd_opp_svs_l1>; }; }; psci { compatible = "arm,psci-1.0"; method = "smc"; cpu_pd0: power-domain-cpu0 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd1: power-domain-cpu1 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd2: power-domain-cpu2 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd3: power-domain-cpu3 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd4: power-domain-cpu4 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd5: power-domain-cpu5 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&little_cpu_sleep_0>; }; cpu_pd6: power-domain-cpu6 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&big_cpu_sleep_0>; }; cpu_pd7: power-domain-cpu7 { #power-domain-cells = <0>; power-domains = <&cluster_pd>; domain-idle-states = <&big_cpu_sleep_0>; }; cluster_pd: power-domain-cluster { #power-domain-cells = <0>; domain-idle-states = <&cluster_sleep_0>; }; }; reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; hyp_mem: hyp-mem@85700000 { reg = <0 0x85700000 0 0x600000>; no-map; }; xbl_mem: xbl-mem@85e00000 { reg = <0 0x85e00000 0 0x100000>; no-map; }; aop_mem: aop-mem@85fc0000 { reg = <0 0x85fc0000 0 0x20000>; no-map; }; aop_cmd_db_mem: aop-cmd-db-mem@85fe0000 { compatible = "qcom,cmd-db"; reg = <0 0x85fe0000 0 0x20000>; no-map; }; smem@86000000 { compatible = "qcom,smem"; reg = <0 0x86000000 0 0x200000>; no-map; hwlocks = <&tcsr_mutex 3>; }; tz_mem: tz@86200000 { reg = <0 0x86200000 0 0x2d00000>; no-map; }; camera_mem: camera-mem@8ab00000 { reg = <0 0x8ab00000 0 0x500000>; no-map; }; mpss_region: mpss@8b000000 { reg = <0 0x8b000000 0 0x7e00000>; no-map; }; venus_mem: venus@92e00000 { reg = <0 0x92e00000 0 0x500000>; no-map; }; wlan_msa_mem: wlan-msa@93300000 { reg = <0 0x93300000 0 0x100000>; no-map; }; cdsp_mem: cdsp@93400000 { reg = <0 0x93400000 0 0x800000>; no-map; }; mba_region: mba@93c00000 { reg = <0 0x93c00000 0 0x200000>; no-map; }; adsp_mem: adsp@93e00000 { reg = <0 0x93e00000 0 0x1e00000>; no-map; }; ipa_fw_mem: ipa-fw@95c00000 { reg = <0 0x95c00000 0 0x10000>; no-map; }; ipa_gsi_mem: ipa-gsi@95c10000 { reg = <0 0x95c10000 0 0x5000>; no-map; }; gpu_mem: gpu@95c15000 { reg = <0 0x95c15000 0 0x2000>; no-map; }; spss_mem: spss@97b00000 { reg = <0 0x97b00000 0 0x100000>; no-map; }; qseecom_mem: qseecom@9e400000 { reg = <0 0x9e400000 0 0x1400000>; no-map; }; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>, <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>, <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>, <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>; }; soc: soc@0 { #address-cells = <2>; #size-cells = <2>; ranges = <0 0 0 0 0x10 0>; dma-ranges = <0 0 0 0 0x10 0>; compatible = "simple-bus"; gcc: clock-controller@100000 { compatible = "qcom,gcc-sdm670"; reg = <0 0x00100000 0 0x1f0000>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>; clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk"; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; }; qfprom: qfprom@784000 { compatible = "qcom,sdm670-qfprom", "qcom,qfprom"; reg = <0 0x00784000 0 0x1000>; #address-cells = <1>; #size-cells = <1>; qusb2_hstx_trim: hstx-trim@1eb { reg = <0x1eb 0x1>; bits = <1 4>; }; }; sdhc_1: mmc@7c4000 { compatible = "qcom,sdm670-sdhci", "qcom,sdhci-msm-v5"; reg = <0 0x007c4000 0 0x1000>, <0 0x007c5000 0 0x1000>, <0 0x007c8000 0 0x8000>; reg-names = "hc", "cqhci", "ice"; interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>, <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_SDCC1_ICE_CORE_CLK>, <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>; clock-names = "iface", "core", "xo", "ice", "bus"; interconnects = <&aggre1_noc MASTER_EMMC 0 &aggre1_noc SLAVE_A1NOC_SNOC 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_EMMC_CFG 0>; interconnect-names = "sdhc-ddr", "cpu-sdhc"; operating-points-v2 = <&sdhc1_opp_table>; iommus = <&apps_smmu 0x140 0xf>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdc1_state_on>; pinctrl-1 = <&sdc1_state_off>; power-domains = <&rpmhpd SDM670_CX>; bus-width = <8>; non-removable; status = "disabled"; sdhc1_opp_table: opp-table { compatible = "operating-points-v2"; opp-20000000 { opp-hz = /bits/ 64 <20000000>; required-opps = <&rpmhpd_opp_min_svs>; opp-peak-kBps = <80000 80000>; opp-avg-kBps = <52286 80000>; }; opp-50000000 { opp-hz = /bits/ 64 <50000000>; required-opps = <&rpmhpd_opp_low_svs>; opp-peak-kBps = <200000 100000>; opp-avg-kBps = <130718 100000>; }; opp-100000000 { opp-hz = /bits/ 64 <100000000>; required-opps = <&rpmhpd_opp_svs>; opp-peak-kBps = <200000 130000>; opp-avg-kBps = <130718 130000>; }; opp-384000000 { opp-hz = /bits/ 64 <384000000>; required-opps = <&rpmhpd_opp_nom>; opp-peak-kBps = <4096000 4096000>; opp-avg-kBps = <1338562 1338562>; }; }; }; gpi_dma0: dma-controller@800000 { #dma-cells = <3>; compatible = "qcom,sdm670-gpi-dma", "qcom,sdm845-gpi-dma"; reg = <0 0x00800000 0 0x60000>; interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>; dma-channels = <13>; dma-channel-mask = <0xfa>; iommus = <&apps_smmu 0x16 0x0>; status = "disabled"; }; qupv3_id_0: geniqup@8c0000 { compatible = "qcom,geni-se-qup"; reg = <0 0x008c0000 0 0x6000>; clock-names = "m-ahb", "s-ahb"; clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; iommus = <&apps_smmu 0x3 0x0>; #address-cells = <2>; #size-cells = <2>; ranges; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>; interconnect-names = "qup-core"; status = "disabled"; i2c0: i2c@880000 { compatible = "qcom,geni-i2c"; reg = <0 0x00880000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c0_default>; interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 0 QCOM_GPI_I2C>, <&gpi_dma0 1 0 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c1: i2c@884000 { compatible = "qcom,geni-i2c"; reg = <0 0x00884000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c1_default>; interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 1 QCOM_GPI_I2C>, <&gpi_dma0 1 1 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c2: i2c@888000 { compatible = "qcom,geni-i2c"; reg = <0 0x00888000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c2_default>; interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 2 QCOM_GPI_I2C>, <&gpi_dma0 1 2 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c3: i2c@88c000 { compatible = "qcom,geni-i2c"; reg = <0 0x0088c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c3_default>; interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 3 QCOM_GPI_I2C>, <&gpi_dma0 1 3 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c4: i2c@890000 { compatible = "qcom,geni-i2c"; reg = <0 0x00890000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c4_default>; interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 4 QCOM_GPI_I2C>, <&gpi_dma0 1 4 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c5: i2c@894000 { compatible = "qcom,geni-i2c"; reg = <0 0x00894000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c5_default>; interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 5 QCOM_GPI_I2C>, <&gpi_dma0 1 5 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c6: i2c@898000 { compatible = "qcom,geni-i2c"; reg = <0 0x00898000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c6_default>; interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 6 QCOM_GPI_I2C>, <&gpi_dma0 1 6 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c7: i2c@89c000 { compatible = "qcom,geni-i2c"; reg = <0 0x0089c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c7_default>; interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre1_noc MASTER_BLSP_1 0 &config_noc SLAVE_BLSP_1 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_1 0>, <&aggre1_noc MASTER_BLSP_1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma0 0 7 QCOM_GPI_I2C>, <&gpi_dma0 1 7 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; }; gpi_dma1: dma-controller@a00000 { #dma-cells = <3>; compatible = "qcom,sdm670-gpi-dma", "qcom,sdm845-gpi-dma"; reg = <0 0x00a00000 0 0x60000>; interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>; dma-channels = <13>; dma-channel-mask = <0xfa>; iommus = <&apps_smmu 0x6d6 0x0>; status = "disabled"; }; qupv3_id_1: geniqup@ac0000 { compatible = "qcom,geni-se-qup"; reg = <0 0x00ac0000 0 0x6000>; clock-names = "m-ahb", "s-ahb"; clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; iommus = <&apps_smmu 0x6c3 0x0>; #address-cells = <2>; #size-cells = <2>; ranges; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>; interconnect-names = "qup-core"; status = "disabled"; i2c8: i2c@a80000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a80000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c8_default>; interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>, <&gpi_dma1 1 0 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c9: i2c@a84000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a84000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c9_default>; interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>, <&gpi_dma1 1 1 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c10: i2c@a88000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a88000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c10_default>; interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>, <&gpi_dma1 1 2 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c11: i2c@a8c000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a8c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c11_default>; interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>, <&gpi_dma1 1 3 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c12: i2c@a90000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a90000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c12_default>; interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>, <&gpi_dma1 1 4 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c13: i2c@a94000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a94000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c13_default>; interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>, <&gpi_dma1 1 5 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c14: i2c@a98000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a98000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c14_default>; interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>, <&gpi_dma1 1 6 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; i2c15: i2c@a9c000 { compatible = "qcom,geni-i2c"; reg = <0 0x00a9c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_i2c15_default>; interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; power-domains = <&rpmhpd SDM670_CX>; interconnects = <&aggre2_noc MASTER_BLSP_2 0 &config_noc SLAVE_BLSP_2 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_BLSP_2 0>, <&aggre2_noc MASTER_BLSP_2 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "qup-core", "qup-config", "qup-memory"; dmas = <&gpi_dma1 0 7 QCOM_GPI_I2C>, <&gpi_dma1 1 7 QCOM_GPI_I2C>; dma-names = "tx", "rx"; status = "disabled"; }; }; mem_noc: interconnect@1380000 { compatible = "qcom,sdm670-mem-noc"; reg = <0 0x01380000 0 0x27200>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; dc_noc: interconnect@14e0000 { compatible = "qcom,sdm670-dc-noc"; reg = <0 0x014e0000 0 0x400>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; config_noc: interconnect@1500000 { compatible = "qcom,sdm670-config-noc"; reg = <0 0x01500000 0 0x5080>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; system_noc: interconnect@1620000 { compatible = "qcom,sdm670-system-noc"; reg = <0 0x01620000 0 0x18080>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; aggre1_noc: interconnect@16e0000 { compatible = "qcom,sdm670-aggre1-noc"; reg = <0 0x016e0000 0 0x15080>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; aggre2_noc: interconnect@1700000 { compatible = "qcom,sdm670-aggre2-noc"; reg = <0 0x01700000 0 0x1f300>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; mmss_noc: interconnect@1740000 { compatible = "qcom,sdm670-mmss-noc"; reg = <0 0x01740000 0 0x1c100>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; tcsr_mutex: hwlock@1f40000 { compatible = "qcom,tcsr-mutex"; reg = <0 0x01f40000 0 0x20000>; #hwlock-cells = <1>; }; tlmm: pinctrl@3400000 { compatible = "qcom,sdm670-tlmm"; reg = <0 0x03400000 0 0xc00000>; interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&tlmm 0 0 151>; wakeup-parent = <&pdc>; qup_i2c0_default: qup-i2c0-default-state { pins = "gpio0", "gpio1"; function = "qup0"; }; qup_i2c1_default: qup-i2c1-default-state { pins = "gpio17", "gpio18"; function = "qup1"; }; qup_i2c2_default: qup-i2c2-default-state { pins = "gpio27", "gpio28"; function = "qup2"; }; qup_i2c3_default: qup-i2c3-default-state { pins = "gpio41", "gpio42"; function = "qup3"; }; qup_i2c4_default: qup-i2c4-default-state { pins = "gpio89", "gpio90"; function = "qup4"; }; qup_i2c5_default: qup-i2c5-default-state { pins = "gpio85", "gpio86"; function = "qup5"; }; qup_i2c6_default: qup-i2c6-default-state { pins = "gpio45", "gpio46"; function = "qup6"; }; qup_i2c7_default: qup-i2c7-default-state { pins = "gpio93", "gpio94"; function = "qup7"; }; qup_i2c8_default: qup-i2c8-default-state { pins = "gpio65", "gpio66"; function = "qup8"; }; qup_i2c9_default: qup-i2c9-default-state { pins = "gpio6", "gpio7"; function = "qup9"; }; qup_i2c10_default: qup-i2c10-default-state { pins = "gpio55", "gpio56"; function = "qup10"; }; qup_i2c11_default: qup-i2c11-default-state { pins = "gpio31", "gpio32"; function = "qup11"; }; qup_i2c12_default: qup-i2c12-default-state { pins = "gpio49", "gpio50"; function = "qup12"; }; qup_i2c13_default: qup-i2c13-default-state { pins = "gpio105", "gpio106"; function = "qup13"; }; qup_i2c14_default: qup-i2c14-default-state { pins = "gpio33", "gpio34"; function = "qup14"; }; qup_i2c15_default: qup-i2c15-default-state { pins = "gpio81", "gpio82"; function = "qup15"; }; sdc1_state_on: sdc1-on-state { clk-pins { pins = "sdc1_clk"; bias-disable; drive-strength = <16>; }; cmd-pins { pins = "sdc1_cmd"; bias-pull-up; drive-strength = <10>; }; data-pins { pins = "sdc1_data"; bias-pull-up; drive-strength = <10>; }; rclk-pins { pins = "sdc1_rclk"; bias-pull-down; }; }; sdc1_state_off: sdc1-off-state { clk-pins { pins = "sdc1_clk"; bias-disable; drive-strength = <2>; }; cmd-pins { pins = "sdc1_cmd"; bias-pull-up; drive-strength = <2>; }; data-pins { pins = "sdc1_data"; bias-pull-up; drive-strength = <2>; }; rclk-pins { pins = "sdc1_rclk"; bias-pull-down; }; }; }; usb_1_hsphy: phy@88e2000 { compatible = "qcom,sdm670-qusb2-phy", "qcom,qusb2-v2-phy"; reg = <0 0x088e2000 0 0x400>; #phy-cells = <0>; clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, <&rpmhcc RPMH_CXO_CLK>; clock-names = "cfg_ahb", "ref"; resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; nvmem-cells = <&qusb2_hstx_trim>; status = "disabled"; }; usb_1: usb@a6f8800 { compatible = "qcom,sdm670-dwc3", "qcom,dwc3"; reg = <0 0x0a6f8800 0 0x400>; #address-cells = <2>; #size-cells = <2>; ranges; dma-ranges; clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, <&gcc GCC_USB30_PRIM_MASTER_CLK>, <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>, <&gcc GCC_USB30_PRIM_SLEEP_CLK>, <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>; clock-names = "cfg_noc", "core", "iface", "sleep", "mock_utmi"; assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, <&gcc GCC_USB30_PRIM_MASTER_CLK>; assigned-clock-rates = <19200000>, <150000000>; interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>, <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, <&pdc 9 IRQ_TYPE_EDGE_BOTH>, <&pdc 8 IRQ_TYPE_EDGE_BOTH>, <&pdc 6 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "pwr_event", "hs_phy_irq", "dp_hs_phy_irq", "dm_hs_phy_irq", "ss_phy_irq"; power-domains = <&gcc USB30_PRIM_GDSC>; resets = <&gcc GCC_USB30_PRIM_BCR>; interconnects = <&aggre2_noc MASTER_USB3 0 &mem_noc SLAVE_EBI_CH0 0>, <&gladiator_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_USB3 0>; interconnect-names = "usb-ddr", "apps-usb"; status = "disabled"; usb_1_dwc3: usb@a600000 { compatible = "snps,dwc3"; reg = <0 0x0a600000 0 0xcd00>; interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; iommus = <&apps_smmu 0x740 0>; snps,dis_u2_susphy_quirk; snps,dis_enblslpm_quirk; phys = <&usb_1_hsphy>; phy-names = "usb2-phy"; }; }; pdc: interrupt-controller@b220000 { compatible = "qcom,sdm670-pdc", "qcom,pdc"; reg = <0 0x0b220000 0 0x30000>; qcom,pdc-ranges = <0 480 40>, <41 521 7>, <49 529 4>, <54 534 24>, <79 559 15>, <94 609 15>, <115 630 7>; #interrupt-cells = <2>; interrupt-parent = <&intc>; interrupt-controller; }; spmi_bus: spmi@c440000 { compatible = "qcom,spmi-pmic-arb"; reg = <0 0x0c440000 0 0x1100>, <0 0x0c600000 0 0x2000000>, <0 0x0e600000 0 0x100000>, <0 0x0e700000 0 0xa0000>, <0 0x0c40a000 0 0x26000>; reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; interrupt-names = "periph_irq"; interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>; qcom,ee = <0>; qcom,channel = <0>; #address-cells = <2>; #size-cells = <0>; interrupt-controller; #interrupt-cells = <4>; }; mdss: display-subsystem@ae00000 { compatible = "qcom,sdm670-mdss"; reg = <0 0x0ae00000 0 0x1000>; reg-names = "mdss"; power-domains = <&dispcc MDSS_GDSC>; clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, <&dispcc DISP_CC_MDSS_MDP_CLK>; clock-names = "iface", "core"; interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; interrupt-controller; #interrupt-cells = <1>; interconnects = <&mmss_noc MASTER_MDP_PORT0 0 &mem_noc SLAVE_EBI_CH0 0>, <&mmss_noc MASTER_MDP_PORT1 0 &mem_noc SLAVE_EBI_CH0 0>; interconnect-names = "mdp0-mem", "mdp1-mem"; iommus = <&apps_smmu 0x880 0x8>, <&apps_smmu 0xc80 0x8>; #address-cells = <2>; #size-cells = <2>; ranges; status = "disabled"; mdss_mdp: display-controller@ae01000 { compatible = "qcom,sdm670-dpu"; reg = <0 0x0ae01000 0 0x8f000>, <0 0x0aeb0000 0 0x2008>; reg-names = "mdp", "vbif"; clocks = <&gcc GCC_DISP_AXI_CLK>, <&dispcc DISP_CC_MDSS_AHB_CLK>, <&dispcc DISP_CC_MDSS_AXI_CLK>, <&dispcc DISP_CC_MDSS_MDP_CLK>, <&dispcc DISP_CC_MDSS_VSYNC_CLK>; clock-names = "gcc-bus", "iface", "bus", "core", "vsync"; assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; assigned-clock-rates = <19200000>; operating-points-v2 = <&mdp_opp_table>; power-domains = <&rpmhpd SDM670_CX>; interrupt-parent = <&mdss>; interrupts = <0>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; dpu_intf0_out: endpoint { remote-endpoint = <&mdss_dsi0_in>; }; }; port@1 { reg = <1>; dpu_intf1_out: endpoint { remote-endpoint = <&mdss_dsi1_in>; }; }; }; mdp_opp_table: opp-table { compatible = "operating-points-v2"; opp-19200000 { opp-hz = /bits/ 64 <19200000>; required-opps = <&rpmhpd_opp_min_svs>; }; opp-171428571 { opp-hz = /bits/ 64 <171428571>; required-opps = <&rpmhpd_opp_low_svs>; }; opp-358000000 { opp-hz = /bits/ 64 <358000000>; required-opps = <&rpmhpd_opp_svs_l1>; }; opp-430000000 { opp-hz = /bits/ 64 <430000000>; required-opps = <&rpmhpd_opp_nom>; }; }; }; mdss_dsi0: dsi@ae94000 { compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0 0x0ae94000 0 0x400>; reg-names = "dsi_ctrl"; interrupt-parent = <&mdss>; interrupts = <4>; clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, <&dispcc DISP_CC_MDSS_PCLK0_CLK>, <&dispcc DISP_CC_MDSS_ESC0_CLK>, <&dispcc DISP_CC_MDSS_AHB_CLK>, <&dispcc DISP_CC_MDSS_AXI_CLK>; clock-names = "byte", "byte_intf", "pixel", "core", "iface", "bus"; assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>; operating-points-v2 = <&dsi_opp_table>; power-domains = <&rpmhpd SDM670_CX>; phys = <&mdss_dsi0_phy>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; mdss_dsi0_in: endpoint { remote-endpoint = <&dpu_intf0_out>; }; }; port@1 { reg = <1>; mdss_dsi0_out: endpoint { }; }; }; }; mdss_dsi0_phy: phy@ae94400 { compatible = "qcom,dsi-phy-10nm"; reg = <0 0x0ae94400 0 0x200>, <0 0x0ae94600 0 0x280>, <0 0x0ae94a00 0 0x1e0>; reg-names = "dsi_phy", "dsi_phy_lane", "dsi_pll"; #clock-cells = <1>; #phy-cells = <0>; clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, <&rpmhcc RPMH_CXO_CLK>; clock-names = "iface", "ref"; status = "disabled"; }; mdss_dsi1: dsi@ae96000 { compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0 0x0ae96000 0 0x400>; reg-names = "dsi_ctrl"; interrupt-parent = <&mdss>; interrupts = <5>; clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>, <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, <&dispcc DISP_CC_MDSS_PCLK1_CLK>, <&dispcc DISP_CC_MDSS_ESC1_CLK>, <&dispcc DISP_CC_MDSS_AHB_CLK>, <&dispcc DISP_CC_MDSS_AXI_CLK>; clock-names = "byte", "byte_intf", "pixel", "core", "iface", "bus"; assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>; assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>; operating-points-v2 = <&dsi_opp_table>; power-domains = <&rpmhpd SDM670_CX>; phys = <&mdss_dsi1_phy>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; mdss_dsi1_in: endpoint { remote-endpoint = <&dpu_intf1_out>; }; }; port@1 { reg = <1>; mdss_dsi1_out: endpoint { }; }; }; }; mdss_dsi1_phy: phy@ae96400 { compatible = "qcom,dsi-phy-10nm"; reg = <0 0x0ae96400 0 0x200>, <0 0x0ae96600 0 0x280>, <0 0x0ae96a00 0 0x10e>; reg-names = "dsi_phy", "dsi_phy_lane", "dsi_pll"; #clock-cells = <1>; #phy-cells = <0>; clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, <&rpmhcc RPMH_CXO_CLK>; clock-names = "iface", "ref"; status = "disabled"; }; }; dispcc: clock-controller@af00000 { compatible = "qcom,sdm845-dispcc"; reg = <0 0x0af00000 0 0x10000>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_DISP_GPLL0_CLK_SRC>, <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>, <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>, <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>, <0>, <0>; clock-names = "bi_tcxo", "gcc_disp_gpll0_clk_src", "gcc_disp_gpll0_div_clk_src", "dsi0_phy_pll_out_byteclk", "dsi0_phy_pll_out_dsiclk", "dsi1_phy_pll_out_byteclk", "dsi1_phy_pll_out_dsiclk", "dp_link_clk_divsel_ten", "dp_vco_divided_clk_src_mux"; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; }; apps_smmu: iommu@15000000 { compatible = "qcom,sdm670-smmu-500", "qcom,smmu-500", "arm,mmu-500"; reg = <0 0x15000000 0 0x80000>; #iommu-cells = <2>; #global-interrupts = <1>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>; dma-coherent; }; gladiator_noc: interconnect@17900000 { compatible = "qcom,sdm670-gladiator-noc"; reg = <0 0x17900000 0 0xd080>; #interconnect-cells = <2>; qcom,bcm-voters = <&apps_bcm_voter>; }; apps_rsc: rsc@179c0000 { compatible = "qcom,rpmh-rsc"; reg = <0 0x179c0000 0 0x10000>, <0 0x179d0000 0 0x10000>, <0 0x179e0000 0 0x10000>; reg-names = "drv-0", "drv-1", "drv-2"; interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>; label = "apps_rsc"; qcom,tcs-offset = <0xd00>; qcom,drv-id = <2>; qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>, <WAKE_TCS 3>, <CONTROL_TCS 1>; power-domains = <&cluster_pd>; apps_bcm_voter: bcm-voter { compatible = "qcom,bcm-voter"; }; rpmhcc: clock-controller { compatible = "qcom,sdm670-rpmh-clk"; #clock-cells = <1>; clock-names = "xo"; clocks = <&xo_board>; }; rpmhpd: power-controller { compatible = "qcom,sdm670-rpmhpd"; #power-domain-cells = <1>; operating-points-v2 = <&rpmhpd_opp_table>; rpmhpd_opp_table: opp-table { compatible = "operating-points-v2"; rpmhpd_opp_ret: opp1 { opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>; }; rpmhpd_opp_min_svs: opp2 { opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>; }; rpmhpd_opp_low_svs: opp3 { opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>; }; rpmhpd_opp_svs: opp4 { opp-level = <RPMH_REGULATOR_LEVEL_SVS>; }; rpmhpd_opp_svs_l1: opp5 { opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>; }; rpmhpd_opp_nom: opp6 { opp-level = <RPMH_REGULATOR_LEVEL_NOM>; }; rpmhpd_opp_nom_l1: opp7 { opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>; }; rpmhpd_opp_nom_l2: opp8 { opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>; }; rpmhpd_opp_turbo: opp9 { opp-level = <RPMH_REGULATOR_LEVEL_TURBO>; }; rpmhpd_opp_turbo_l1: opp10 { opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>; }; }; }; }; intc: interrupt-controller@17a00000 { compatible = "arm,gic-v3"; reg = <0 0x17a00000 0 0x10000>, /* GICD */ <0 0x17a60000 0 0x100000>; /* GICR * 8 */ interrupt-controller; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <3>; }; osm_l3: interconnect@17d41000 { compatible = "qcom,sdm670-osm-l3", "qcom,osm-l3"; reg = <0 0x17d41000 0 0x1400>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; clock-names = "xo", "alternate"; #interconnect-cells = <1>; }; cpufreq_hw: cpufreq@17d43000 { compatible = "qcom,sdm670-cpufreq-hw", "qcom,cpufreq-hw"; reg = <0 0x17d43000 0 0x1400>, <0 0x17d45800 0 0x1400>; reg-names = "freq-domain0", "freq-domain1"; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; clock-names = "xo", "alternate"; #freq-domain-cells = <1>; }; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * Realtek Otto MIPS platform watchdog * * Watchdog timer that will reset the system after timeout, using the selected * reset mode. * * Counter scaling and timeouts: * - Base prescale of (2 << 25), providing tick duration T_0: 168ms @ 200MHz * - PRESCALE: logarithmic prescaler adding a factor of {1, 2, 4, 8} * - Phase 1: Times out after (PHASE1 + 1) × PRESCALE × T_0 * Generates an interrupt, WDT cannot be stopped after phase 1 * - Phase 2: starts after phase 1, times out after (PHASE2 + 1) × PRESCALE × T_0 * Resets the system according to RST_MODE */ #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/math.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/reboot.h> #include <linux/watchdog.h> #define OTTO_WDT_REG_CNTR 0x0 #define OTTO_WDT_CNTR_PING BIT(31) #define OTTO_WDT_REG_INTR 0x4 #define OTTO_WDT_INTR_PHASE_1 BIT(31) #define OTTO_WDT_INTR_PHASE_2 BIT(30) #define OTTO_WDT_REG_CTRL 0x8 #define OTTO_WDT_CTRL_ENABLE BIT(31) #define OTTO_WDT_CTRL_PRESCALE GENMASK(30, 29) #define OTTO_WDT_CTRL_PHASE1 GENMASK(26, 22) #define OTTO_WDT_CTRL_PHASE2 GENMASK(19, 15) #define OTTO_WDT_CTRL_RST_MODE GENMASK(1, 0) #define OTTO_WDT_MODE_SOC 0 #define OTTO_WDT_MODE_CPU 1 #define OTTO_WDT_MODE_SOFTWARE 2 #define OTTO_WDT_CTRL_DEFAULT OTTO_WDT_MODE_CPU #define OTTO_WDT_PRESCALE_MAX 3 /* * One higher than the max values contained in PHASE{1,2}, since a value of 0 * corresponds to one tick. */ #define OTTO_WDT_PHASE_TICKS_MAX 32 /* * The maximum reset delay is actually 2×32 ticks, but that would require large * pretimeout values for timeouts longer than 32 ticks. Limit the maximum timeout * to 32 + 1 to ensure small pretimeout values can be configured as expected. */ #define OTTO_WDT_TIMEOUT_TICKS_MAX (OTTO_WDT_PHASE_TICKS_MAX + 1) struct otto_wdt_ctrl { struct watchdog_device wdev; struct device *dev; void __iomem *base; unsigned int clk_rate_khz; int irq_phase1; }; static int otto_wdt_start(struct watchdog_device *wdev) { struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); u32 v; v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); v |= OTTO_WDT_CTRL_ENABLE; iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); return 0; } static int otto_wdt_stop(struct watchdog_device *wdev) { struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); u32 v; v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); v &= ~OTTO_WDT_CTRL_ENABLE; iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); return 0; } static int otto_wdt_ping(struct watchdog_device *wdev) { struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); iowrite32(OTTO_WDT_CNTR_PING, ctrl->base + OTTO_WDT_REG_CNTR); return 0; } static int otto_wdt_tick_ms(struct otto_wdt_ctrl *ctrl, int prescale) { return DIV_ROUND_CLOSEST(1 << (25 + prescale), ctrl->clk_rate_khz); } /* * The timer asserts the PHASE1/PHASE2 IRQs when the number of ticks exceeds * the value stored in those fields. This means each phase will run for at least * one tick, so small values need to be clamped to correctly reflect the timeout. */ static inline unsigned int div_round_ticks(unsigned int val, unsigned int tick_duration, unsigned int min_ticks) { return max(min_ticks, DIV_ROUND_UP(val, tick_duration)); } static int otto_wdt_determine_timeouts(struct watchdog_device *wdev, unsigned int timeout, unsigned int pretimeout) { struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); unsigned int pretimeout_ms = pretimeout * 1000; unsigned int timeout_ms = timeout * 1000; unsigned int prescale_next = 0; unsigned int phase1_ticks; unsigned int phase2_ticks; unsigned int total_ticks; unsigned int prescale; unsigned int tick_ms; u32 v; do { prescale = prescale_next; if (prescale > OTTO_WDT_PRESCALE_MAX) return -EINVAL; tick_ms = otto_wdt_tick_ms(ctrl, prescale); total_ticks = div_round_ticks(timeout_ms, tick_ms, 2); phase1_ticks = div_round_ticks(timeout_ms - pretimeout_ms, tick_ms, 1); phase2_ticks = total_ticks - phase1_ticks; prescale_next++; } while (phase1_ticks > OTTO_WDT_PHASE_TICKS_MAX || phase2_ticks > OTTO_WDT_PHASE_TICKS_MAX); v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); v &= ~(OTTO_WDT_CTRL_PRESCALE | OTTO_WDT_CTRL_PHASE1 | OTTO_WDT_CTRL_PHASE2); v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE1, phase1_ticks - 1); v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE2, phase2_ticks - 1); v |= FIELD_PREP(OTTO_WDT_CTRL_PRESCALE, prescale); iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); timeout_ms = total_ticks * tick_ms; ctrl->wdev.timeout = timeout_ms / 1000; pretimeout_ms = phase2_ticks * tick_ms; ctrl->wdev.pretimeout = pretimeout_ms / 1000; return 0; } static int otto_wdt_set_timeout(struct watchdog_device *wdev, unsigned int val) { return otto_wdt_determine_timeouts(wdev, val, min(wdev->pretimeout, val - 1)); } static int otto_wdt_set_pretimeout(struct watchdog_device *wdev, unsigned int val) { return otto_wdt_determine_timeouts(wdev, wdev->timeout, val); } static int otto_wdt_restart(struct watchdog_device *wdev, unsigned long reboot_mode, void *data) { struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev); u32 reset_mode; u32 v; disable_irq(ctrl->irq_phase1); switch (reboot_mode) { case REBOOT_SOFT: reset_mode = OTTO_WDT_MODE_SOFTWARE; break; case REBOOT_WARM: reset_mode = OTTO_WDT_MODE_CPU; break; default: reset_mode = OTTO_WDT_MODE_SOC; break; } /* Configure for shortest timeout and wait for reset to occur */ v = FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, reset_mode) | OTTO_WDT_CTRL_ENABLE; iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); mdelay(3 * otto_wdt_tick_ms(ctrl, 0)); return 0; } static irqreturn_t otto_wdt_phase1_isr(int irq, void *dev_id) { struct otto_wdt_ctrl *ctrl = dev_id; iowrite32(OTTO_WDT_INTR_PHASE_1, ctrl->base + OTTO_WDT_REG_INTR); dev_crit(ctrl->dev, "phase 1 timeout\n"); watchdog_notify_pretimeout(&ctrl->wdev); return IRQ_HANDLED; } static const struct watchdog_ops otto_wdt_ops = { .owner = THIS_MODULE, .start = otto_wdt_start, .stop = otto_wdt_stop, .ping = otto_wdt_ping, .set_timeout = otto_wdt_set_timeout, .set_pretimeout = otto_wdt_set_pretimeout, .restart = otto_wdt_restart, }; static const struct watchdog_info otto_wdt_info = { .identity = "Realtek Otto watchdog timer", .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_PRETIMEOUT, }; static int otto_wdt_probe_clk(struct otto_wdt_ctrl *ctrl) { struct clk *clk; clk = devm_clk_get_enabled(ctrl->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(ctrl->dev, PTR_ERR(clk), "Failed to get clock\n"); ctrl->clk_rate_khz = clk_get_rate(clk) / 1000; if (ctrl->clk_rate_khz == 0) return dev_err_probe(ctrl->dev, -ENXIO, "Failed to get clock rate\n"); return 0; } static int otto_wdt_probe_reset_mode(struct otto_wdt_ctrl *ctrl) { static const char *mode_property = "realtek,reset-mode"; const struct fwnode_handle *node = ctrl->dev->fwnode; int mode_count; u32 mode; u32 v; if (!node) return -ENXIO; mode_count = fwnode_property_string_array_count(node, mode_property); if (mode_count < 0) return mode_count; else if (mode_count == 0) return 0; else if (mode_count != 1) return -EINVAL; if (fwnode_property_match_string(node, mode_property, "soc") == 0) mode = OTTO_WDT_MODE_SOC; else if (fwnode_property_match_string(node, mode_property, "cpu") == 0) mode = OTTO_WDT_MODE_CPU; else if (fwnode_property_match_string(node, mode_property, "software") == 0) mode = OTTO_WDT_MODE_SOFTWARE; else return -EINVAL; v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL); v &= ~OTTO_WDT_CTRL_RST_MODE; v |= FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, mode); iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL); return 0; } static int otto_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct otto_wdt_ctrl *ctrl; unsigned int max_tick_ms; int ret; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; ctrl->dev = dev; ctrl->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctrl->base)) return PTR_ERR(ctrl->base); /* Clear any old interrupts and reset initial state */ iowrite32(OTTO_WDT_INTR_PHASE_1 | OTTO_WDT_INTR_PHASE_2, ctrl->base + OTTO_WDT_REG_INTR); iowrite32(OTTO_WDT_CTRL_DEFAULT, ctrl->base + OTTO_WDT_REG_CTRL); ret = otto_wdt_probe_clk(ctrl); if (ret) return ret; ctrl->irq_phase1 = platform_get_irq_byname(pdev, "phase1"); if (ctrl->irq_phase1 < 0) return ctrl->irq_phase1; ret = devm_request_irq(dev, ctrl->irq_phase1, otto_wdt_phase1_isr, 0, "realtek-otto-wdt", ctrl); if (ret) return dev_err_probe(dev, ret, "Failed to get IRQ for phase1\n"); ret = otto_wdt_probe_reset_mode(ctrl); if (ret) return dev_err_probe(dev, ret, "Invalid reset mode specified\n"); ctrl->wdev.parent = dev; ctrl->wdev.info = &otto_wdt_info; ctrl->wdev.ops = &otto_wdt_ops; /* * Since pretimeout cannot be disabled, min. timeout is twice the * subsystem resolution. Max. timeout is ca. 43s at a bus clock of 200MHz. */ ctrl->wdev.min_timeout = 2; max_tick_ms = otto_wdt_tick_ms(ctrl, OTTO_WDT_PRESCALE_MAX); ctrl->wdev.max_hw_heartbeat_ms = max_tick_ms * OTTO_WDT_TIMEOUT_TICKS_MAX; ctrl->wdev.timeout = min(30U, ctrl->wdev.max_hw_heartbeat_ms / 1000); watchdog_set_drvdata(&ctrl->wdev, ctrl); watchdog_init_timeout(&ctrl->wdev, 0, dev); watchdog_stop_on_reboot(&ctrl->wdev); watchdog_set_restart_priority(&ctrl->wdev, 128); ret = otto_wdt_determine_timeouts(&ctrl->wdev, ctrl->wdev.timeout, 1); if (ret) return dev_err_probe(dev, ret, "Failed to set timeout\n"); return devm_watchdog_register_device(dev, &ctrl->wdev); } static const struct of_device_id otto_wdt_ids[] = { { .compatible = "realtek,rtl8380-wdt" }, { .compatible = "realtek,rtl8390-wdt" }, { .compatible = "realtek,rtl9300-wdt" }, { .compatible = "realtek,rtl9310-wdt" }, { } }; MODULE_DEVICE_TABLE(of, otto_wdt_ids); static struct platform_driver otto_wdt_driver = { .probe = otto_wdt_probe, .driver = { .name = "realtek-otto-watchdog", .of_match_table = otto_wdt_ids, }, }; module_platform_driver(otto_wdt_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sander Vanheule <[email protected]>"); MODULE_DESCRIPTION("Realtek Otto watchdog timer driver");
// SPDX-License-Identifier: GPL-2.0+ OR MIT /* * Copyright (C) 2019 Kontron Electronics GmbH */ #include "imx8mm.dtsi" / { model = "Kontron SL i.MX8MM (N801X SOM)"; compatible = "kontron,imx8mm-sl", "fsl,imx8mm"; memory@40000000 { device_type = "memory"; /* * There are multiple SoM flavors with different DDR sizes. * The smallest is 1GB. For larger sizes the bootloader will * update the reg property. */ reg = <0x0 0x40000000 0 0x80000000>; }; chosen { stdout-path = &uart3; }; }; &A53_0 { cpu-supply = <&reg_vdd_arm>; }; &A53_1 { cpu-supply = <&reg_vdd_arm>; }; &A53_2 { cpu-supply = <&reg_vdd_arm>; }; &A53_3 { cpu-supply = <&reg_vdd_arm>; }; &ddrc { operating-points-v2 = <&ddrc_opp_table>; ddrc_opp_table: opp-table { compatible = "operating-points-v2"; opp-100000000 { opp-hz = /bits/ 64 <100000000>; }; opp-750000000 { opp-hz = /bits/ 64 <750000000>; }; }; }; &ecspi1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ecspi1>; cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; status = "okay"; flash@0 { compatible = "mxicy,mx25r1635f", "jedec,spi-nor"; spi-max-frequency = <80000000>; reg = <0>; partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; partition@0 { label = "u-boot"; reg = <0x0 0x1e0000>; }; partition@1e0000 { label = "env"; reg = <0x1e0000 0x10000>; }; partition@1f0000 { label = "env_redundant"; reg = <0x1f0000 0x10000>; }; }; }; }; &i2c1 { clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; pca9450: pmic@25 { compatible = "nxp,pca9450a"; reg = <0x25>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pmic>; interrupt-parent = <&gpio1>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; regulators { reg_vdd_soc: BUCK1 { regulator-name = "+0V8_VDD_SOC (BUCK1)"; regulator-min-microvolt = <800000>; regulator-max-microvolt = <850000>; regulator-boot-on; regulator-always-on; regulator-ramp-delay = <3125>; nxp,dvs-run-voltage = <850000>; nxp,dvs-standby-voltage = <800000>; }; reg_vdd_arm: BUCK2 { regulator-name = "+0V9_VDD_ARM (BUCK2)"; regulator-min-microvolt = <850000>; regulator-max-microvolt = <950000>; regulator-boot-on; regulator-always-on; regulator-ramp-delay = <3125>; nxp,dvs-run-voltage = <950000>; nxp,dvs-standby-voltage = <850000>; }; reg_vdd_dram: BUCK3 { regulator-name = "+0V9_VDD_DRAM&PU (BUCK3)"; regulator-min-microvolt = <850000>; regulator-max-microvolt = <950000>; regulator-boot-on; regulator-always-on; }; reg_vdd_3v3: BUCK4 { regulator-name = "+3V3 (BUCK4)"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; reg_vdd_1v8: BUCK5 { regulator-name = "+1V8 (BUCK5)"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; regulator-always-on; }; reg_nvcc_dram: BUCK6 { regulator-name = "+1V1_NVCC_DRAM (BUCK6)"; regulator-min-microvolt = <1100000>; regulator-max-microvolt = <1100000>; regulator-boot-on; regulator-always-on; }; reg_nvcc_snvs: LDO1 { regulator-name = "+1V8_NVCC_SNVS (LDO1)"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; regulator-always-on; }; reg_vdd_snvs: LDO2 { regulator-name = "+0V8_VDD_SNVS (LDO2)"; regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; }; reg_vdda: LDO3 { regulator-name = "+1V8_VDDA (LDO3)"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; regulator-always-on; }; reg_vdd_phy: LDO4 { regulator-name = "+0V9_VDD_PHY (LDO4)"; regulator-min-microvolt = <900000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; }; reg_nvcc_sd: LDO5 { regulator-name = "NVCC_SD (LDO5)"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; }; }; }; }; &uart3 { /* console */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3>; status = "okay"; }; &usdhc1 { pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_usdhc1>; pinctrl-1 = <&pinctrl_usdhc1_100mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>; vmmc-supply = <&reg_vdd_3v3>; vqmmc-supply = <&reg_vdd_1v8>; bus-width = <8>; non-removable; status = "okay"; }; &wdog1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wdog>; fsl,ext-reset-output; status = "okay"; }; &iomuxc { pinctrl_ecspi1: ecspi1grp { fsl,pins = < MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x82 MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82 MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x82 MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19 >; }; pinctrl_i2c1: i2c1grp { fsl,pins = < MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083 MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083 >; }; pinctrl_pmic: pmicgrp { fsl,pins = < MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x141 >; }; pinctrl_uart3: uart3grp { fsl,pins = < MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140 MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140 >; }; pinctrl_usdhc1: usdhc1grp { fsl,pins = < MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190 MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0 MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0 MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0 MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0 MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0 MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d0 MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d0 MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d0 MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d0 MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x019 MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x190 >; }; pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp { fsl,pins = < MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x194 MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d4 MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d4 MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d4 MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d4 MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d4 MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d4 MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d4 MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d4 MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d4 MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x019 MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x194 >; }; pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp { fsl,pins = < MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x196 MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d6 MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d6 MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d6 MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d6 MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d6 MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d6 MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d6 MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d6 MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d6 MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x019 MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x196 >; }; pinctrl_wdog: wdoggrp { fsl,pins = < MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6 >; }; };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #ifndef __GFS2_DOT_H__ #define __GFS2_DOT_H__ enum { NO_CREATE = 0, CREATE = 1, }; enum { NO_FORCE = 0, FORCE = 1, }; #define GFS2_FAST_NAME_SIZE 8 #endif /* __GFS2_DOT_H__ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ISCSI_TARGET_TPG_H #define ISCSI_TARGET_TPG_H #include <linux/types.h> struct iscsi_np; struct iscsit_session; struct iscsi_tiqn; struct iscsi_tpg_np; struct se_node_acl; struct sockaddr_storage; extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16); extern int iscsit_load_discovery_tpg(void); extern void iscsit_release_discovery_tpg(void); extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, struct iscsi_np *, struct iscsi_tpg_np **); extern int iscsit_get_tpg(struct iscsi_portal_group *); extern void iscsit_put_tpg(struct iscsi_portal_group *); extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, int); extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *); extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int); extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *); extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int); extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *, struct sockaddr_storage *, struct iscsi_tpg_np *, int); extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, struct iscsi_tpg_np *); extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32); extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32); extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32); extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32); extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2012 ARM Limited */ #define DRVNAME "vexpress-hwmon" #define pr_fmt(fmt) DRVNAME ": " fmt #include <linux/device.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/vexpress.h> struct vexpress_hwmon_data { struct device *hwmon_dev; struct regmap *reg; }; static ssize_t vexpress_hwmon_label_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { const char *label = of_get_property(dev->of_node, "label", NULL); return sysfs_emit(buffer, "%s\n", label); } static ssize_t vexpress_hwmon_u32_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { struct vexpress_hwmon_data *data = dev_get_drvdata(dev); int err; u32 value; err = regmap_read(data->reg, 0, &value); if (err) return err; return sysfs_emit(buffer, "%u\n", value / to_sensor_dev_attr(dev_attr)->index); } static ssize_t vexpress_hwmon_u64_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { struct vexpress_hwmon_data *data = dev_get_drvdata(dev); int err; u32 value_hi, value_lo; err = regmap_read(data->reg, 0, &value_lo); if (err) return err; err = regmap_read(data->reg, 1, &value_hi); if (err) return err; return sysfs_emit(buffer, "%llu\n", div_u64(((u64)value_hi << 32) | value_lo, to_sensor_dev_attr(dev_attr)->index)); } static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = kobj_to_dev(kobj); struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); if (dev_attr->show == vexpress_hwmon_label_show && !of_property_present(dev->of_node, "label")) return 0; return attr->mode; } struct vexpress_hwmon_type { const char *name; const struct attribute_group **attr_groups; }; #if !defined(CONFIG_REGULATOR_VEXPRESS) static DEVICE_ATTR(in1_label, 0444, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR_RO(in1_input, vexpress_hwmon_u32, 1000); static struct attribute *vexpress_hwmon_attrs_volt[] = { &dev_attr_in1_label.attr, &sensor_dev_attr_in1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_volt = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_volt, }; static struct vexpress_hwmon_type vexpress_hwmon_volt = { .name = "vexpress_volt", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_volt, NULL, }, }; #endif static DEVICE_ATTR(curr1_label, 0444, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR_RO(curr1_input, vexpress_hwmon_u32, 1000); static struct attribute *vexpress_hwmon_attrs_amp[] = { &dev_attr_curr1_label.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_amp = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_amp, }; static struct vexpress_hwmon_type vexpress_hwmon_amp = { .name = "vexpress_amp", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_amp, NULL }, }; static DEVICE_ATTR(temp1_label, 0444, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR_RO(temp1_input, vexpress_hwmon_u32, 1000); static struct attribute *vexpress_hwmon_attrs_temp[] = { &dev_attr_temp1_label.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_temp = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_temp, }; static struct vexpress_hwmon_type vexpress_hwmon_temp = { .name = "vexpress_temp", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_temp, NULL }, }; static DEVICE_ATTR(power1_label, 0444, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR_RO(power1_input, vexpress_hwmon_u32, 1); static struct attribute *vexpress_hwmon_attrs_power[] = { &dev_attr_power1_label.attr, &sensor_dev_attr_power1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_power = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_power, }; static struct vexpress_hwmon_type vexpress_hwmon_power = { .name = "vexpress_power", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_power, NULL }, }; static DEVICE_ATTR(energy1_label, 0444, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR_RO(energy1_input, vexpress_hwmon_u64, 1); static struct attribute *vexpress_hwmon_attrs_energy[] = { &dev_attr_energy1_label.attr, &sensor_dev_attr_energy1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_energy = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_energy, }; static struct vexpress_hwmon_type vexpress_hwmon_energy = { .name = "vexpress_energy", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_energy, NULL }, }; static const struct of_device_id vexpress_hwmon_of_match[] = { #if !defined(CONFIG_REGULATOR_VEXPRESS) { .compatible = "arm,vexpress-volt", .data = &vexpress_hwmon_volt, }, #endif { .compatible = "arm,vexpress-amp", .data = &vexpress_hwmon_amp, }, { .compatible = "arm,vexpress-temp", .data = &vexpress_hwmon_temp, }, { .compatible = "arm,vexpress-power", .data = &vexpress_hwmon_power, }, { .compatible = "arm,vexpress-energy", .data = &vexpress_hwmon_energy, }, {} }; MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match); static int vexpress_hwmon_probe(struct platform_device *pdev) { struct vexpress_hwmon_data *data; const struct vexpress_hwmon_type *type; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; platform_set_drvdata(pdev, data); type = of_device_get_match_data(&pdev->dev); if (!type) return -ENODEV; data->reg = devm_regmap_init_vexpress_config(&pdev->dev); if (IS_ERR(data->reg)) return PTR_ERR(data->reg); data->hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, type->name, data, type->attr_groups); return PTR_ERR_OR_ZERO(data->hwmon_dev); } static struct platform_driver vexpress_hwmon_driver = { .probe = vexpress_hwmon_probe, .driver = { .name = DRVNAME, .of_match_table = vexpress_hwmon_of_match, }, }; module_platform_driver(vexpress_hwmon_driver); MODULE_AUTHOR("Pawel Moll <[email protected]>"); MODULE_DESCRIPTION("Versatile Express hwmon sensors driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:vexpress-hwmon");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOP4D_H #define _PGTABLE_NOP4D_H #ifndef __ASSEMBLY__ #define __PAGETABLE_P4D_FOLDED 1 typedef struct { pgd_t pgd; } p4d_t; #define P4D_SHIFT PGDIR_SHIFT #define PTRS_PER_P4D 1 #define P4D_SIZE (1UL << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE-1)) /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the p4d is never bad, and a p4d always exists (as it's folded * into the pgd entry) */ static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } static inline int pgd_present(pgd_t pgd) { return 1; } static inline void pgd_clear(pgd_t *pgd) { } #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) #define pgd_populate(mm, pgd, p4d) do { } while (0) #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) /* * (p4ds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { return (p4d_t *)pgd; } #define p4d_val(x) (pgd_val((x).pgd)) #define __p4d(x) ((p4d_t) { __pgd(x) }) #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) #define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd }))) /* * allocating and freeing a p4d is trivial: the 1-entry p4d is * inside the pgd, so has no extra memory associated with it. */ #define p4d_alloc_one(mm, address) NULL #define p4d_free(mm, x) do { } while (0) #define p4d_free_tlb(tlb, x, a) do { } while (0) #undef p4d_addr_end #define p4d_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOP4D_H */
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <[email protected]> * * Copyright (c) 2017 Amlogic, inc. * Author: Yixun Lan <[email protected]> * */ #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H #define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H /* RESET0 */ #define RESET_HIU 0 #define RESET_PCIE_A 1 #define RESET_PCIE_B 2 #define RESET_DDR_TOP 3 /* 4 */ #define RESET_VIU 5 #define RESET_PCIE_PHY 6 #define RESET_PCIE_APB 7 /* 8 */ /* 9 */ #define RESET_VENC 10 #define RESET_ASSIST 11 /* 12 */ #define RESET_VCBUS 13 /* 14 */ /* 15 */ #define RESET_GIC 16 #define RESET_CAPB3_DECODE 17 /* 18-21 */ #define RESET_SYS_CPU_CAPB3 22 #define RESET_CBUS_CAPB3 23 #define RESET_AHB_CNTL 24 #define RESET_AHB_DATA 25 #define RESET_VCBUS_CLK81 26 #define RESET_MMC 27 /* 28-31 */ /* RESET1 */ /* 32 */ /* 33 */ #define RESET_USB_OTG 34 #define RESET_DDR 35 #define RESET_AO_RESET 36 /* 37 */ #define RESET_AHB_SRAM 38 /* 39 */ /* 40 */ #define RESET_DMA 41 #define RESET_ISA 42 #define RESET_ETHERNET 43 /* 44 */ #define RESET_SD_EMMC_B 45 #define RESET_SD_EMMC_C 46 #define RESET_ROM_BOOT 47 #define RESET_SYS_CPU_0 48 #define RESET_SYS_CPU_1 49 #define RESET_SYS_CPU_2 50 #define RESET_SYS_CPU_3 51 #define RESET_SYS_CPU_CORE_0 52 #define RESET_SYS_CPU_CORE_1 53 #define RESET_SYS_CPU_CORE_2 54 #define RESET_SYS_CPU_CORE_3 55 #define RESET_SYS_PLL_DIV 56 #define RESET_SYS_CPU_AXI 57 #define RESET_SYS_CPU_L2 58 #define RESET_SYS_CPU_P 59 #define RESET_SYS_CPU_MBIST 60 /* 61-63 */ /* RESET2 */ /* 64 */ /* 65 */ #define RESET_AUDIO 66 /* 67 */ #define RESET_MIPI_HOST 68 #define RESET_AUDIO_LOCKER 69 #define RESET_GE2D 70 /* 71-76 */ #define RESET_AO_CPU_RESET 77 /* 78-95 */ /* RESET3 */ #define RESET_RING_OSCILLATOR 96 /* 97-127 */ /* RESET4 */ /* 128 */ /* 129 */ #define RESET_MIPI_PHY 130 /* 131-140 */ #define RESET_VENCL 141 #define RESET_I2C_MASTER_2 142 #define RESET_I2C_MASTER_1 143 /* 144-159 */ /* RESET5 */ /* 160-191 */ /* RESET6 */ #define RESET_PERIPHS_GENERAL 192 #define RESET_PERIPHS_SPICC 193 /* 194 */ /* 195 */ #define RESET_PERIPHS_I2C_MASTER_0 196 /* 197-200 */ #define RESET_PERIPHS_UART_0 201 #define RESET_PERIPHS_UART_1 202 /* 203-204 */ #define RESET_PERIPHS_SPI_0 205 #define RESET_PERIPHS_I2C_MASTER_3 206 /* 207-223 */ /* RESET7 */ #define RESET_USB_DDR_0 224 #define RESET_USB_DDR_1 225 #define RESET_USB_DDR_2 226 #define RESET_USB_DDR_3 227 /* 228 */ #define RESET_DEVICE_MMC_ARB 229 /* 230 */ #define RESET_VID_LOCK 231 #define RESET_A9_DMC_PIPEL 232 #define RESET_DMC_VPU_PIPEL 233 /* 234-255 */ #endif
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm8580.h -- audio driver for WM8580 * * Copyright 2008 Samsung Electronics. * Author: Ryu Euiyoul * [email protected] */ #ifndef _WM8580_H #define _WM8580_H #define WM8580_PLLA 1 #define WM8580_PLLB 2 #define WM8580_MCLK 1 #define WM8580_CLKOUTSRC 2 #define WM8580_CLKSRC_MCLK 1 #define WM8580_CLKSRC_PLLA 2 #define WM8580_CLKSRC_PLLB 3 #define WM8580_CLKSRC_OSC 4 #define WM8580_CLKSRC_NONE 5 #define WM8580_CLKSRC_ADCMCLK 6 #define WM8580_DAI_PAIFRX 0 #define WM8580_DAI_PAIFTX 1 #endif
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ #include <linux/bitfield.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_vf_msg.h" #include "adf_pfvf_vf_proto.h" /** * adf_vf2pf_notify_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) { struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT }; if (adf_send_vf2pf_msg(accel_dev, msg)) { dev_err(&GET_DEV(accel_dev), "Failed to send Init event to PF\n"); return -EFAULT; } set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); return 0; } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init); /** * adf_vf2pf_notify_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends a shutdown message from the VF to a PF * * Return: void */ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) { struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN }; if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status)) if (adf_send_vf2pf_msg(accel_dev, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send Shutdown event to PF\n"); } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown); void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev) { struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE }; /* Check compatibility version */ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) return; if (adf_send_vf2pf_msg(accel_dev, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send Restarting complete event to PF\n"); } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete); int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { u8 pf_version; int compat; int ret; struct pfvf_message resp; struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ, .data = ADF_PFVF_COMPAT_THIS_VERSION, }; BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255); ret = adf_send_vf2pf_req(accel_dev, msg, &resp); if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to send Compatibility Version Request.\n"); return ret; } pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data); compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data); /* Response from PF received, check compatibility */ switch (compat) { case ADF_PF2VF_VF_COMPATIBLE: break; case ADF_PF2VF_VF_COMPAT_UNKNOWN: /* VF is newer than PF - compatible for now */ break; case ADF_PF2VF_VF_INCOMPATIBLE: dev_err(&GET_DEV(accel_dev), "PF (vers %d) and VF (vers %d) are not compatible\n", pf_version, ADF_PFVF_COMPAT_THIS_VERSION); return -EINVAL; default: dev_err(&GET_DEV(accel_dev), "Invalid response from PF; assume not compatible\n"); return -EINVAL; } accel_dev->vf.pf_compat_ver = pf_version; return 0; } int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct capabilities_v3 cap_msg = { 0 }; unsigned int len = sizeof(cap_msg); if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES) /* The PF is too old to support the extended capabilities */ return 0; if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY, (u8 *)&cap_msg, &len)) { dev_err(&GET_DEV(accel_dev), "QAT: Failed to get block message response\n"); return -EFAULT; } switch (cap_msg.hdr.version) { default: /* Newer version received, handle only the know parts */ fallthrough; case ADF_PFVF_CAPABILITIES_V3_VERSION: if (likely(len >= sizeof(struct capabilities_v3))) hw_data->clock_frequency = cap_msg.frequency; else dev_info(&GET_DEV(accel_dev), "Could not get frequency"); fallthrough; case ADF_PFVF_CAPABILITIES_V2_VERSION: if (likely(len >= sizeof(struct capabilities_v2))) hw_data->accel_capabilities_mask = cap_msg.capabilities; else dev_info(&GET_DEV(accel_dev), "Could not get capabilities"); fallthrough; case ADF_PFVF_CAPABILITIES_V1_VERSION: if (likely(len >= sizeof(struct capabilities_v1))) { hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps; } else { dev_err(&GET_DEV(accel_dev), "Capabilities message truncated to %d bytes\n", len); return -EFAULT; } } return 0; } int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev) { struct ring_to_svc_map_v1 rts_map_msg = { 0 }; unsigned int len = sizeof(rts_map_msg); if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP) /* Use already set default mappings */ return 0; if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP, (u8 *)&rts_map_msg, &len)) { dev_err(&GET_DEV(accel_dev), "QAT: Failed to get block message response\n"); return -EFAULT; } if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) { dev_err(&GET_DEV(accel_dev), "RING_TO_SVC message truncated to %d bytes\n", len); return -EFAULT; } /* Only v1 at present */ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map; return 0; }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2020 IBM Corporation * */ #ifndef _NXU_DBG_H_ #define _NXU_DBG_H_ #include <sys/file.h> #include <stdint.h> #include <stdio.h> #include <time.h> #include <pthread.h> extern FILE * nx_gzip_log; extern int nx_gzip_trace; extern unsigned int nx_gzip_inflate_impl; extern unsigned int nx_gzip_deflate_impl; extern unsigned int nx_gzip_inflate_flags; extern unsigned int nx_gzip_deflate_flags; extern int nx_dbg; pthread_mutex_t mutex_log; #define nx_gzip_trace_enabled() (nx_gzip_trace & 0x1) #define nx_gzip_hw_trace_enabled() (nx_gzip_trace & 0x2) #define nx_gzip_sw_trace_enabled() (nx_gzip_trace & 0x4) #define nx_gzip_gather_statistics() (nx_gzip_trace & 0x8) #define nx_gzip_per_stream_stat() (nx_gzip_trace & 0x10) #define prt(fmt, ...) do { \ pthread_mutex_lock(&mutex_log); \ flock(nx_gzip_log->_fileno, LOCK_EX); \ time_t t; struct tm *m; time(&t); m = localtime(&t); \ fprintf(nx_gzip_log, "[%04d/%02d/%02d %02d:%02d:%02d] " \ "pid %d: " fmt, \ (int)m->tm_year + 1900, (int)m->tm_mon+1, (int)m->tm_mday, \ (int)m->tm_hour, (int)m->tm_min, (int)m->tm_sec, \ (int)getpid(), ## __VA_ARGS__); \ fflush(nx_gzip_log); \ flock(nx_gzip_log->_fileno, LOCK_UN); \ pthread_mutex_unlock(&mutex_log); \ } while (0) /* Use in case of an error */ #define prt_err(fmt, ...) do { if (nx_dbg >= 0) { \ prt("%s:%u: Error: "fmt, \ __FILE__, __LINE__, ## __VA_ARGS__); \ }} while (0) /* Use in case of an warning */ #define prt_warn(fmt, ...) do { if (nx_dbg >= 1) { \ prt("%s:%u: Warning: "fmt, \ __FILE__, __LINE__, ## __VA_ARGS__); \ }} while (0) /* Informational printouts */ #define prt_info(fmt, ...) do { if (nx_dbg >= 2) { \ prt("Info: "fmt, ## __VA_ARGS__); \ }} while (0) /* Trace zlib wrapper code */ #define prt_trace(fmt, ...) do { if (nx_gzip_trace_enabled()) { \ prt("### "fmt, ## __VA_ARGS__); \ }} while (0) /* Trace statistics */ #define prt_stat(fmt, ...) do { if (nx_gzip_gather_statistics()) { \ prt("### "fmt, ## __VA_ARGS__); \ }} while (0) /* Trace zlib hardware implementation */ #define hw_trace(fmt, ...) do { \ if (nx_gzip_hw_trace_enabled()) \ fprintf(nx_gzip_log, "hhh " fmt, ## __VA_ARGS__); \ } while (0) /* Trace zlib software implementation */ #define sw_trace(fmt, ...) do { \ if (nx_gzip_sw_trace_enabled()) \ fprintf(nx_gzip_log, "sss " fmt, ## __VA_ARGS__); \ } while (0) /** * str_to_num - Convert string into number and copy with endings like * KiB for kilobyte * MiB for megabyte * GiB for gigabyte */ uint64_t str_to_num(char *str); void nx_lib_debug(int onoff); #endif /* _NXU_DBG_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Voltage regulation driver for active-semi ACT8945A PMIC * * Copyright (C) 2015 Atmel Corporation * * Author: Wenyou Yang <[email protected]> */ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <dt-bindings/regulator/active-semi,8945a-regulator.h> /* * ACT8945A Global Register Map. */ #define ACT8945A_SYS_MODE 0x00 #define ACT8945A_SYS_CTRL 0x01 #define ACT8945A_SYS_UNLK_REGS 0x0b #define ACT8945A_DCDC1_VSET1 0x20 #define ACT8945A_DCDC1_VSET2 0x21 #define ACT8945A_DCDC1_CTRL 0x22 #define ACT8945A_DCDC1_SUS 0x24 #define ACT8945A_DCDC2_VSET1 0x30 #define ACT8945A_DCDC2_VSET2 0x31 #define ACT8945A_DCDC2_CTRL 0x32 #define ACT8945A_DCDC2_SUS 0x34 #define ACT8945A_DCDC3_VSET1 0x40 #define ACT8945A_DCDC3_VSET2 0x41 #define ACT8945A_DCDC3_CTRL 0x42 #define ACT8945A_DCDC3_SUS 0x44 #define ACT8945A_LDO1_VSET 0x50 #define ACT8945A_LDO1_CTRL 0x51 #define ACT8945A_LDO1_SUS 0x52 #define ACT8945A_LDO2_VSET 0x54 #define ACT8945A_LDO2_CTRL 0x55 #define ACT8945A_LDO2_SUS 0x56 #define ACT8945A_LDO3_VSET 0x60 #define ACT8945A_LDO3_CTRL 0x61 #define ACT8945A_LDO3_SUS 0x62 #define ACT8945A_LDO4_VSET 0x64 #define ACT8945A_LDO4_CTRL 0x65 #define ACT8945A_LDO4_SUS 0x66 /* * Field Definitions. */ #define ACT8945A_ENA 0x80 /* ON - [7] */ #define ACT8945A_VSEL_MASK 0x3F /* VSET - [5:0] */ /* * ACT8945A Voltage Number */ #define ACT8945A_VOLTAGE_NUM 64 enum { ACT8945A_ID_DCDC1, ACT8945A_ID_DCDC2, ACT8945A_ID_DCDC3, ACT8945A_ID_LDO1, ACT8945A_ID_LDO2, ACT8945A_ID_LDO3, ACT8945A_ID_LDO4, ACT8945A_ID_MAX, }; struct act8945a_pmic { struct regmap *regmap; u32 op_mode[ACT8945A_ID_MAX]; }; static const struct linear_range act8945a_voltage_ranges[] = { REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000), REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000), REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000), }; static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable) { struct regmap *regmap = rdev->regmap; int id = rdev_get_id(rdev); int reg, val; switch (id) { case ACT8945A_ID_DCDC1: reg = ACT8945A_DCDC1_SUS; val = 0xa8; break; case ACT8945A_ID_DCDC2: reg = ACT8945A_DCDC2_SUS; val = 0xa8; break; case ACT8945A_ID_DCDC3: reg = ACT8945A_DCDC3_SUS; val = 0xa8; break; case ACT8945A_ID_LDO1: reg = ACT8945A_LDO1_SUS; val = 0xe8; break; case ACT8945A_ID_LDO2: reg = ACT8945A_LDO2_SUS; val = 0xe8; break; case ACT8945A_ID_LDO3: reg = ACT8945A_LDO3_SUS; val = 0xe8; break; case ACT8945A_ID_LDO4: reg = ACT8945A_LDO4_SUS; val = 0xe8; break; default: return -EINVAL; } if (enable) val |= BIT(4); /* * Ask the PMIC to enable/disable this output when entering hibernate * mode. */ return regmap_write(regmap, reg, val); } static int act8945a_set_suspend_enable(struct regulator_dev *rdev) { return act8945a_set_suspend_state(rdev, true); } static int act8945a_set_suspend_disable(struct regulator_dev *rdev) { return act8945a_set_suspend_state(rdev, false); } static unsigned int act8945a_of_map_mode(unsigned int mode) { switch (mode) { case ACT8945A_REGULATOR_MODE_FIXED: case ACT8945A_REGULATOR_MODE_NORMAL: return REGULATOR_MODE_NORMAL; case ACT8945A_REGULATOR_MODE_LOWPOWER: return REGULATOR_MODE_STANDBY; default: return REGULATOR_MODE_INVALID; } } static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); struct regmap *regmap = rdev->regmap; int id = rdev_get_id(rdev); int reg, ret, val = 0; switch (id) { case ACT8945A_ID_DCDC1: reg = ACT8945A_DCDC1_CTRL; break; case ACT8945A_ID_DCDC2: reg = ACT8945A_DCDC2_CTRL; break; case ACT8945A_ID_DCDC3: reg = ACT8945A_DCDC3_CTRL; break; case ACT8945A_ID_LDO1: reg = ACT8945A_LDO1_CTRL; break; case ACT8945A_ID_LDO2: reg = ACT8945A_LDO2_CTRL; break; case ACT8945A_ID_LDO3: reg = ACT8945A_LDO3_CTRL; break; case ACT8945A_ID_LDO4: reg = ACT8945A_LDO4_CTRL; break; default: return -EINVAL; } switch (mode) { case REGULATOR_MODE_STANDBY: if (id > ACT8945A_ID_DCDC3) val = BIT(5); break; case REGULATOR_MODE_NORMAL: if (id <= ACT8945A_ID_DCDC3) val = BIT(5); break; default: return -EINVAL; } ret = regmap_update_bits(regmap, reg, BIT(5), val); if (ret) return ret; act8945a->op_mode[id] = mode; return 0; } static unsigned int act8945a_get_mode(struct regulator_dev *rdev) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX) return -EINVAL; return act8945a->op_mode[id]; } static const struct regulator_ops act8945a_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .set_mode = act8945a_set_mode, .get_mode = act8945a_get_mode, .is_enabled = regulator_is_enabled_regmap, .set_suspend_enable = act8945a_set_suspend_enable, .set_suspend_disable = act8945a_set_suspend_disable, }; #define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply) \ [_family##_ID_##_id] = { \ .name = _name, \ .supply_name = _supply, \ .of_match = of_match_ptr("REG_"#_id), \ .of_map_mode = act8945a_of_map_mode, \ .regulators_node = of_match_ptr("regulators"), \ .id = _family##_ID_##_id, \ .type = REGULATOR_VOLTAGE, \ .ops = &act8945a_ops, \ .n_voltages = ACT8945A_VOLTAGE_NUM, \ .linear_ranges = act8945a_voltage_ranges, \ .n_linear_ranges = ARRAY_SIZE(act8945a_voltage_ranges), \ .vsel_reg = _family##_##_id##_##_vsel_reg, \ .vsel_mask = ACT8945A_VSEL_MASK, \ .enable_reg = _family##_##_id##_CTRL, \ .enable_mask = ACT8945A_ENA, \ .owner = THIS_MODULE, \ } static const struct regulator_desc act8945a_regulators[] = { ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET1, "vp1"), ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET1, "vp2"), ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET1, "vp3"), ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"), ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"), ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"), ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"), }; static const struct regulator_desc act8945a_alt_regulators[] = { ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET2, "vp1"), ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET2, "vp2"), ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET2, "vp3"), ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"), ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"), ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"), ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"), }; static int act8945a_pmic_probe(struct platform_device *pdev) { struct regulator_config config = { }; const struct regulator_desc *regulators; struct act8945a_pmic *act8945a; struct regulator_dev *rdev; int i, num_regulators; bool voltage_select; act8945a = devm_kzalloc(&pdev->dev, sizeof(*act8945a), GFP_KERNEL); if (!act8945a) return -ENOMEM; act8945a->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!act8945a->regmap) { dev_err(&pdev->dev, "could not retrieve regmap from parent device\n"); return -EINVAL; } voltage_select = of_property_read_bool(pdev->dev.parent->of_node, "active-semi,vsel-high"); if (voltage_select) { regulators = act8945a_alt_regulators; num_regulators = ARRAY_SIZE(act8945a_alt_regulators); } else { regulators = act8945a_regulators; num_regulators = ARRAY_SIZE(act8945a_regulators); } config.dev = &pdev->dev; config.dev->of_node = pdev->dev.parent->of_node; config.driver_data = act8945a; for (i = 0; i < num_regulators; i++) { rdev = devm_regulator_register(&pdev->dev, &regulators[i], &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register %s regulator\n", regulators[i].name); return PTR_ERR(rdev); } } platform_set_drvdata(pdev, act8945a); /* Unlock expert registers. */ return regmap_write(act8945a->regmap, ACT8945A_SYS_UNLK_REGS, 0xef); } static int __maybe_unused act8945a_suspend(struct device *pdev) { struct act8945a_pmic *act8945a = dev_get_drvdata(pdev); /* * Ask the PMIC to enter the suspend mode on the next PWRHLD * transition. */ return regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x42); } static SIMPLE_DEV_PM_OPS(act8945a_pm, act8945a_suspend, NULL); static void act8945a_pmic_shutdown(struct platform_device *pdev) { struct act8945a_pmic *act8945a = platform_get_drvdata(pdev); /* * Ask the PMIC to shutdown everything on the next PWRHLD transition. */ regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x0); } static struct platform_driver act8945a_pmic_driver = { .driver = { .name = "act8945a-regulator", .probe_type = PROBE_PREFER_ASYNCHRONOUS, .pm = &act8945a_pm, }, .probe = act8945a_pmic_probe, .shutdown = act8945a_pmic_shutdown, }; module_platform_driver(act8945a_pmic_driver); MODULE_DESCRIPTION("Active-semi ACT8945A voltage regulator driver"); MODULE_AUTHOR("Wenyou Yang <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2005-2007 Takahiro Hirofuchi */ #ifndef __USBIP_NETWORK_H #define __USBIP_NETWORK_H #ifdef HAVE_CONFIG_H #include "../config.h" #endif #include <sys/types.h> #include <stdint.h> extern int usbip_port; extern char *usbip_port_string; void usbip_setup_port_number(char *arg); /* ---------------------------------------------------------------------- */ /* Common header for all the kinds of PDUs. */ struct op_common { uint16_t version; #define OP_REQUEST (0x80 << 8) #define OP_REPLY (0x00 << 8) uint16_t code; /* status codes defined in usbip_common.h */ uint32_t status; /* op_code status (for reply) */ } __attribute__((packed)); /* ---------------------------------------------------------------------- */ /* Dummy Code */ #define OP_UNSPEC 0x00 #define OP_REQ_UNSPEC OP_UNSPEC #define OP_REP_UNSPEC OP_UNSPEC /* ---------------------------------------------------------------------- */ /* Retrieve USB device information. (still not used) */ #define OP_DEVINFO 0x02 #define OP_REQ_DEVINFO (OP_REQUEST | OP_DEVINFO) #define OP_REP_DEVINFO (OP_REPLY | OP_DEVINFO) struct op_devinfo_request { char busid[SYSFS_BUS_ID_SIZE]; } __attribute__((packed)); struct op_devinfo_reply { struct usbip_usb_device udev; struct usbip_usb_interface uinf[]; } __attribute__((packed)); /* ---------------------------------------------------------------------- */ /* Import a remote USB device. */ #define OP_IMPORT 0x03 #define OP_REQ_IMPORT (OP_REQUEST | OP_IMPORT) #define OP_REP_IMPORT (OP_REPLY | OP_IMPORT) struct op_import_request { char busid[SYSFS_BUS_ID_SIZE]; } __attribute__((packed)); struct op_import_reply { struct usbip_usb_device udev; // struct usbip_usb_interface uinf[]; } __attribute__((packed)); #define PACK_OP_IMPORT_REQUEST(pack, request) do {\ } while (0) #define PACK_OP_IMPORT_REPLY(pack, reply) do {\ usbip_net_pack_usb_device(pack, &(reply)->udev);\ } while (0) /* ---------------------------------------------------------------------- */ /* Export a USB device to a remote host. */ #define OP_EXPORT 0x06 #define OP_REQ_EXPORT (OP_REQUEST | OP_EXPORT) #define OP_REP_EXPORT (OP_REPLY | OP_EXPORT) struct op_export_request { struct usbip_usb_device udev; } __attribute__((packed)); struct op_export_reply { int returncode; } __attribute__((packed)); #define PACK_OP_EXPORT_REQUEST(pack, request) do {\ usbip_net_pack_usb_device(pack, &(request)->udev);\ } while (0) #define PACK_OP_EXPORT_REPLY(pack, reply) do {\ } while (0) /* ---------------------------------------------------------------------- */ /* un-Export a USB device from a remote host. */ #define OP_UNEXPORT 0x07 #define OP_REQ_UNEXPORT (OP_REQUEST | OP_UNEXPORT) #define OP_REP_UNEXPORT (OP_REPLY | OP_UNEXPORT) struct op_unexport_request { struct usbip_usb_device udev; } __attribute__((packed)); struct op_unexport_reply { int returncode; } __attribute__((packed)); #define PACK_OP_UNEXPORT_REQUEST(pack, request) do {\ usbip_net_pack_usb_device(pack, &(request)->udev);\ } while (0) #define PACK_OP_UNEXPORT_REPLY(pack, reply) do {\ } while (0) /* ---------------------------------------------------------------------- */ /* Negotiate IPSec encryption key. (still not used) */ #define OP_CRYPKEY 0x04 #define OP_REQ_CRYPKEY (OP_REQUEST | OP_CRYPKEY) #define OP_REP_CRYPKEY (OP_REPLY | OP_CRYPKEY) struct op_crypkey_request { /* 128bit key */ uint32_t key[4]; } __attribute__((packed)); struct op_crypkey_reply { uint32_t __reserved; } __attribute__((packed)); /* ---------------------------------------------------------------------- */ /* Retrieve the list of exported USB devices. */ #define OP_DEVLIST 0x05 #define OP_REQ_DEVLIST (OP_REQUEST | OP_DEVLIST) #define OP_REP_DEVLIST (OP_REPLY | OP_DEVLIST) struct op_devlist_request { } __attribute__((packed)); struct op_devlist_reply { uint32_t ndev; /* followed by reply_extra[] */ } __attribute__((packed)); struct op_devlist_reply_extra { struct usbip_usb_device udev; struct usbip_usb_interface uinf[]; } __attribute__((packed)); #define PACK_OP_DEVLIST_REQUEST(pack, request) do {\ } while (0) #define PACK_OP_DEVLIST_REPLY(pack, reply) do {\ (reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\ } while (0) uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num); uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num); void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev); void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf); ssize_t usbip_net_recv(int sockfd, void *buff, size_t bufflen); ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen); int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status); int usbip_net_recv_op_common(int sockfd, uint16_t *code, int *status); int usbip_net_set_reuseaddr(int sockfd); int usbip_net_set_nodelay(int sockfd); int usbip_net_set_keepalive(int sockfd); int usbip_net_set_v6only(int sockfd); int usbip_net_tcp_connect(char *hostname, char *port); #endif /* __USBIP_NETWORK_H */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ #ifndef HWS_BWC_COMPLEX_H_ #define HWS_BWC_COMPLEX_H_ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask); int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, struct mlx5hws_table *table, u32 priority, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask); void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher); int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher); int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, struct mlx5hws_match_parameters *params, u32 flow_source, struct mlx5hws_rule_action rule_actions[], u16 bwc_queue_idx); int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule); #endif /* HWS_BWC_COMPLEX_H_ */
// SPDX-License-Identifier: GPL-2.0 /* ptrace.c: Sparc process tracing support. * * Copyright (C) 1996, 2008 David S. Miller ([email protected]) * * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, * and David Mosberger. * * Added Linux support -miguel (weird, eh?, the original code was meant * to emulate SunOS). */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/regset.h> #include <linux/elf.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include "kernel.h" /* #define ALLOW_INIT_TRACING */ /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* nothing to do */ } enum sparc_regset { REGSET_GENERAL, REGSET_FP, }; static int regwindow32_get(struct task_struct *target, const struct pt_regs *regs, u32 *uregs) { unsigned long reg_window = regs->u_regs[UREG_I6]; int size = 16 * sizeof(u32); if (target == current) { if (copy_from_user(uregs, (void __user *)reg_window, size)) return -EFAULT; } else { if (access_process_vm(target, reg_window, uregs, size, FOLL_FORCE) != size) return -EFAULT; } return 0; } static int regwindow32_set(struct task_struct *target, const struct pt_regs *regs, u32 *uregs) { unsigned long reg_window = regs->u_regs[UREG_I6]; int size = 16 * sizeof(u32); if (target == current) { if (copy_to_user((void __user *)reg_window, uregs, size)) return -EFAULT; } else { if (access_process_vm(target, reg_window, uregs, size, FOLL_FORCE | FOLL_WRITE) != size) return -EFAULT; } return 0; } static int genregs32_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { const struct pt_regs *regs = target->thread.kregs; u32 uregs[16]; if (target == current) flush_user_windows(); membuf_write(&to, regs->u_regs, 16 * sizeof(u32)); if (!to.left) return 0; if (regwindow32_get(target, regs, uregs)) return -EFAULT; membuf_write(&to, uregs, 16 * sizeof(u32)); membuf_store(&to, regs->psr); membuf_store(&to, regs->pc); membuf_store(&to, regs->npc); membuf_store(&to, regs->y); return membuf_zero(&to, 2 * sizeof(u32)); } static int genregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = target->thread.kregs; u32 uregs[16]; u32 psr; int ret; if (target == current) flush_user_windows(); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->u_regs, 0, 16 * sizeof(u32)); if (ret || !count) return ret; if (regwindow32_get(target, regs, uregs)) return -EFAULT; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 16 * sizeof(u32), 32 * sizeof(u32)); if (ret) return ret; if (regwindow32_set(target, regs, uregs)) return -EFAULT; if (!count) return 0; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &psr, 32 * sizeof(u32), 33 * sizeof(u32)); if (ret) return ret; regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | (psr & (PSR_ICC | PSR_SYSCALL)); if (!count) return 0; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs->pc, 33 * sizeof(u32), 34 * sizeof(u32)); if (ret || !count) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs->npc, 34 * sizeof(u32), 35 * sizeof(u32)); if (ret || !count) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs->y, 35 * sizeof(u32), 36 * sizeof(u32)); if (ret || !count) return ret; user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 36 * sizeof(u32), 38 * sizeof(u32)); return 0; } static int fpregs32_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { #if 0 if (target == current) save_and_clear_fpu(); #endif membuf_write(&to, target->thread.float_regs, 32 * sizeof(u32)); membuf_zero(&to, sizeof(u32)); membuf_write(&to, &target->thread.fsr, sizeof(u32)); membuf_store(&to, (u32)((1 << 8) | (8 << 16))); return membuf_zero(&to, 64 * sizeof(u32)); } static int fpregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *fpregs = target->thread.float_regs; int ret; #if 0 if (target == current) save_and_clear_fpu(); #endif ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fpregs, 0, 32 * sizeof(u32)); if (!ret) user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 32 * sizeof(u32), 33 * sizeof(u32)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fsr, 33 * sizeof(u32), 34 * sizeof(u32)); if (!ret) user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 34 * sizeof(u32), -1); return ret; } static const struct user_regset sparc32_regsets[] = { /* Format is: * G0 --> G7 * O0 --> O7 * L0 --> L7 * I0 --> I7 * PSR, PC, nPC, Y, WIM, TBR */ [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = 38, .size = sizeof(u32), .align = sizeof(u32), .regset_get = genregs32_get, .set = genregs32_set }, /* Format is: * F0 --> F31 * empty 32-bit word * FSR (32--bit word) * FPU QUEUE COUNT (8-bit char) * FPU QUEUE ENTRYSIZE (8-bit char) * FPU ENABLED (8-bit char) * empty 8-bit char * FPU QUEUE (64 32-bit ints) */ [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = 99, .size = sizeof(u32), .align = sizeof(u32), .regset_get = fpregs32_get, .set = fpregs32_set }, }; static int getregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { const struct pt_regs *regs = target->thread.kregs; if (target == current) flush_user_windows(); membuf_store(&to, regs->psr); membuf_store(&to, regs->pc); membuf_store(&to, regs->npc); membuf_store(&to, regs->y); return membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u32)); } static int setregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = target->thread.kregs; u32 v[4]; int ret; if (target == current) flush_user_windows(); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, v, 0, 4 * sizeof(u32)); if (ret) return ret; regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | (v[0] & (PSR_ICC | PSR_SYSCALL)); regs->pc = v[1]; regs->npc = v[2]; regs->y = v[3]; return user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->u_regs + 1, 4 * sizeof(u32) , 19 * sizeof(u32)); } static int getfpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { #if 0 if (target == current) save_and_clear_fpu(); #endif membuf_write(&to, &target->thread.float_regs, 32 * sizeof(u32)); membuf_write(&to, &target->thread.fsr, sizeof(u32)); return membuf_zero(&to, 35 * sizeof(u32)); } static int setfpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *fpregs = target->thread.float_regs; int ret; #if 0 if (target == current) save_and_clear_fpu(); #endif ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fpregs, 0, 32 * sizeof(u32)); if (ret) return ret; return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fsr, 32 * sizeof(u32), 33 * sizeof(u32)); } static const struct user_regset ptrace32_regsets[] = { [REGSET_GENERAL] = { .n = 19, .size = sizeof(u32), .regset_get = getregs_get, .set = setregs_set, }, [REGSET_FP] = { .n = 68, .size = sizeof(u32), .regset_get = getfpregs_get, .set = setfpregs_set, }, }; static const struct user_regset_view ptrace32_view = { .regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets) }; static const struct user_regset_view user_sparc32_view = { .name = "sparc", .e_machine = EM_SPARC, .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_sparc32_view; } struct fps { unsigned long regs[32]; unsigned long fsr; unsigned long flags; unsigned long extra; unsigned long fpqd; struct fq { unsigned long *insnaddr; unsigned long insn; } fpq[16]; }; long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4]; void __user *addr2p; struct pt_regs __user *pregs; struct fps __user *fps; int ret; addr2p = (void __user *) addr2; pregs = (struct pt_regs __user *) addr; fps = (struct fps __user *) addr; switch(request) { case PTRACE_GETREGS: { ret = copy_regset_to_user(child, &ptrace32_view, REGSET_GENERAL, 0, 19 * sizeof(u32), pregs); break; } case PTRACE_SETREGS: { ret = copy_regset_from_user(child, &ptrace32_view, REGSET_GENERAL, 0, 19 * sizeof(u32), pregs); break; } case PTRACE_GETFPREGS: { ret = copy_regset_to_user(child, &ptrace32_view, REGSET_FP, 0, 68 * sizeof(u32), fps); break; } case PTRACE_SETFPREGS: { ret = copy_regset_from_user(child, &ptrace32_view, REGSET_FP, 0, 33 * sizeof(u32), fps); break; } case PTRACE_READTEXT: case PTRACE_READDATA: ret = ptrace_readdata(child, addr, addr2p, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; case PTRACE_WRITETEXT: case PTRACE_WRITEDATA: ret = ptrace_writedata(child, addr2p, addr, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; default: if (request == PTRACE_SPARC_DETACH) request = PTRACE_DETACH; ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p) { int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (syscall_exit_p) ptrace_report_syscall_exit(regs, 0); else ret = ptrace_report_syscall_entry(regs); } return ret; }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. * */ #include <linux/device.h> #include <linux/interconnect.h> #include <linux/interconnect-provider.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <dt-bindings/interconnect/qcom,qdu1000-rpmh.h> #include "bcm-voter.h" #include "icc-common.h" #include "icc-rpmh.h" #include "qdu1000.h" static struct qcom_icc_node qup0_core_master = { .name = "qup0_core_master", .id = QDU1000_MASTER_QUP_CORE_0, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_QUP_CORE_0 }, }; static struct qcom_icc_node qup1_core_master = { .name = "qup1_core_master", .id = QDU1000_MASTER_QUP_CORE_1, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_QUP_CORE_1 }, }; static struct qcom_icc_node alm_sys_tcu = { .name = "alm_sys_tcu", .id = QDU1000_MASTER_SYS_TCU, .channels = 1, .buswidth = 8, .num_links = 2, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC }, }; static struct qcom_icc_node chm_apps = { .name = "chm_apps", .id = QDU1000_MASTER_APPSS_PROC, .channels = 1, .buswidth = 16, .num_links = 4, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC, QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC }, }; static struct qcom_icc_node qnm_ecpri_dma = { .name = "qnm_ecpri_dma", .id = QDU1000_MASTER_GEMNOC_ECPRI_DMA, .channels = 2, .buswidth = 32, .num_links = 2, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC }, }; static struct qcom_icc_node qnm_fec_2_gemnoc = { .name = "qnm_fec_2_gemnoc", .id = QDU1000_MASTER_FEC_2_GEMNOC, .channels = 2, .buswidth = 32, .num_links = 2, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC }, }; static struct qcom_icc_node qnm_pcie = { .name = "qnm_pcie", .id = QDU1000_MASTER_ANOC_PCIE_GEM_NOC, .channels = 1, .buswidth = 64, .num_links = 3, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC, QDU1000_SLAVE_GEMNOC_MODEM_CNOC }, }; static struct qcom_icc_node qnm_snoc_gc = { .name = "qnm_snoc_gc", .id = QDU1000_MASTER_SNOC_GC_MEM_NOC, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_LLCC }, }; static struct qcom_icc_node qnm_snoc_sf = { .name = "qnm_snoc_sf", .id = QDU1000_MASTER_SNOC_SF_MEM_NOC, .channels = 1, .buswidth = 16, .num_links = 4, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC, QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC }, }; static struct qcom_icc_node qxm_mdsp = { .name = "qxm_mdsp", .id = QDU1000_MASTER_MSS_PROC, .channels = 1, .buswidth = 16, .num_links = 3, .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC }, }; static struct qcom_icc_node llcc_mc = { .name = "llcc_mc", .id = QDU1000_MASTER_LLCC, .channels = 8, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_EBI1 }, }; static struct qcom_icc_node qhm_gic = { .name = "qhm_gic", .id = QDU1000_MASTER_GIC_AHB, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF }, }; static struct qcom_icc_node qhm_qdss_bam = { .name = "qhm_qdss_bam", .id = QDU1000_MASTER_QDSS_BAM, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF }, }; static struct qcom_icc_node qhm_qpic = { .name = "qhm_qpic", .id = QDU1000_MASTER_QPIC, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qhm_qspi = { .name = "qhm_qspi", .id = QDU1000_MASTER_QSPI_0, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qhm_qup0 = { .name = "qhm_qup0", .id = QDU1000_MASTER_QUP_0, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qhm_qup1 = { .name = "qhm_qup1", .id = QDU1000_MASTER_QUP_1, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qhm_system_noc_cfg = { .name = "qhm_system_noc_cfg", .id = QDU1000_MASTER_SNOC_CFG, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_SLAVE_SERVICE_SNOC }, }; static struct qcom_icc_node qnm_aggre_noc = { .name = "qnm_aggre_noc", .id = QDU1000_MASTER_ANOC_SNOC, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF }, }; static struct qcom_icc_node qnm_aggre_noc_gsi = { .name = "qnm_aggre_noc_gsi", .id = QDU1000_MASTER_ANOC_GSI, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC }, }; static struct qcom_icc_node qnm_gemnoc_cnoc = { .name = "qnm_gemnoc_cnoc", .id = QDU1000_MASTER_GEM_NOC_CNOC, .channels = 1, .buswidth = 16, .num_links = 36, .links = { QDU1000_SLAVE_AHB2PHY_SOUTH, QDU1000_SLAVE_AHB2PHY_NORTH, QDU1000_SLAVE_AHB2PHY_EAST, QDU1000_SLAVE_AOSS, QDU1000_SLAVE_CLK_CTL, QDU1000_SLAVE_RBCPR_CX_CFG, QDU1000_SLAVE_RBCPR_MX_CFG, QDU1000_SLAVE_CRYPTO_0_CFG, QDU1000_SLAVE_ECPRI_CFG, QDU1000_SLAVE_IMEM_CFG, QDU1000_SLAVE_IPC_ROUTER_CFG, QDU1000_SLAVE_CNOC_MSS, QDU1000_SLAVE_PCIE_CFG, QDU1000_SLAVE_PDM, QDU1000_SLAVE_PIMEM_CFG, QDU1000_SLAVE_PRNG, QDU1000_SLAVE_QDSS_CFG, QDU1000_SLAVE_QPIC, QDU1000_SLAVE_QSPI_0, QDU1000_SLAVE_QUP_0, QDU1000_SLAVE_QUP_1, QDU1000_SLAVE_SDCC_2, QDU1000_SLAVE_SMBUS_CFG, QDU1000_SLAVE_SNOC_CFG, QDU1000_SLAVE_TCSR, QDU1000_SLAVE_TLMM, QDU1000_SLAVE_TME_CFG, QDU1000_SLAVE_TSC_CFG, QDU1000_SLAVE_USB3_0, QDU1000_SLAVE_VSENSE_CTRL_CFG, QDU1000_SLAVE_DDRSS_CFG, QDU1000_SLAVE_IMEM, QDU1000_SLAVE_PIMEM, QDU1000_SLAVE_ETHERNET_SS, QDU1000_SLAVE_QDSS_STM, QDU1000_SLAVE_TCU }, }; static struct qcom_icc_node qnm_gemnoc_modem_slave = { .name = "qnm_gemnoc_modem_slave", .id = QDU1000_MASTER_GEMNOC_MODEM_CNOC, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_SLAVE_MODEM_OFFLINE }, }; static struct qcom_icc_node qnm_gemnoc_pcie = { .name = "qnm_gemnoc_pcie", .id = QDU1000_MASTER_GEM_NOC_PCIE_SNOC, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_SLAVE_PCIE_0 }, }; static struct qcom_icc_node qxm_crypto = { .name = "qxm_crypto", .id = QDU1000_MASTER_CRYPTO, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qxm_ecpri_gsi = { .name = "qxm_ecpri_gsi", .id = QDU1000_MASTER_ECPRI_GSI, .channels = 1, .buswidth = 8, .num_links = 2, .links = { QDU1000_SLAVE_ANOC_SNOC_GSI, QDU1000_SLAVE_PCIE_0 }, }; static struct qcom_icc_node qxm_pimem = { .name = "qxm_pimem", .id = QDU1000_MASTER_PIMEM, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC }, }; static struct qcom_icc_node xm_ecpri_dma = { .name = "xm_ecpri_dma", .id = QDU1000_MASTER_SNOC_ECPRI_DMA, .channels = 2, .buswidth = 32, .num_links = 2, .links = { QDU1000_SLAVE_ECPRI_GEMNOC, QDU1000_SLAVE_PCIE_0 }, }; static struct qcom_icc_node xm_gic = { .name = "xm_gic", .id = QDU1000_MASTER_GIC, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC }, }; static struct qcom_icc_node xm_pcie = { .name = "xm_pcie", .id = QDU1000_MASTER_PCIE, .channels = 1, .buswidth = 64, .num_links = 1, .links = { QDU1000_SLAVE_ANOC_PCIE_GEM_NOC }, }; static struct qcom_icc_node xm_qdss_etr0 = { .name = "xm_qdss_etr0", .id = QDU1000_MASTER_QDSS_ETR, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF }, }; static struct qcom_icc_node xm_qdss_etr1 = { .name = "xm_qdss_etr1", .id = QDU1000_MASTER_QDSS_ETR_1, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF }, }; static struct qcom_icc_node xm_sdc = { .name = "xm_sdc", .id = QDU1000_MASTER_SDCC_1, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node xm_usb3 = { .name = "xm_usb3", .id = QDU1000_MASTER_USB3, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_SLAVE_A1NOC_SNOC }, }; static struct qcom_icc_node qup0_core_slave = { .name = "qup0_core_slave", .id = QDU1000_SLAVE_QUP_CORE_0, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qup1_core_slave = { .name = "qup1_core_slave", .id = QDU1000_SLAVE_QUP_CORE_1, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qns_gem_noc_cnoc = { .name = "qns_gem_noc_cnoc", .id = QDU1000_SLAVE_GEM_NOC_CNOC, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_MASTER_GEM_NOC_CNOC }, }; static struct qcom_icc_node qns_llcc = { .name = "qns_llcc", .id = QDU1000_SLAVE_LLCC, .channels = 8, .buswidth = 16, .num_links = 1, .links = { QDU1000_MASTER_LLCC }, }; static struct qcom_icc_node qns_modem_slave = { .name = "qns_modem_slave", .id = QDU1000_SLAVE_GEMNOC_MODEM_CNOC, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_MASTER_GEMNOC_MODEM_CNOC }, }; static struct qcom_icc_node qns_pcie = { .name = "qns_pcie", .id = QDU1000_SLAVE_MEM_NOC_PCIE_SNOC, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_MASTER_GEM_NOC_PCIE_SNOC }, }; static struct qcom_icc_node ebi = { .name = "ebi", .id = QDU1000_SLAVE_EBI1, .channels = 8, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_ahb2phy0_south = { .name = "qhs_ahb2phy0_south", .id = QDU1000_SLAVE_AHB2PHY_SOUTH, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_ahb2phy1_north = { .name = "qhs_ahb2phy1_north", .id = QDU1000_SLAVE_AHB2PHY_NORTH, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_ahb2phy2_east = { .name = "qhs_ahb2phy2_east", .id = QDU1000_SLAVE_AHB2PHY_EAST, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_aoss = { .name = "qhs_aoss", .id = QDU1000_SLAVE_AOSS, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_clk_ctl = { .name = "qhs_clk_ctl", .id = QDU1000_SLAVE_CLK_CTL, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_cpr_cx = { .name = "qhs_cpr_cx", .id = QDU1000_SLAVE_RBCPR_CX_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_cpr_mx = { .name = "qhs_cpr_mx", .id = QDU1000_SLAVE_RBCPR_MX_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_crypto_cfg = { .name = "qhs_crypto_cfg", .id = QDU1000_SLAVE_CRYPTO_0_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_ecpri_cfg = { .name = "qhs_ecpri_cfg", .id = QDU1000_SLAVE_ECPRI_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_imem_cfg = { .name = "qhs_imem_cfg", .id = QDU1000_SLAVE_IMEM_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_ipc_router = { .name = "qhs_ipc_router", .id = QDU1000_SLAVE_IPC_ROUTER_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_mss_cfg = { .name = "qhs_mss_cfg", .id = QDU1000_SLAVE_CNOC_MSS, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_pcie_cfg = { .name = "qhs_pcie_cfg", .id = QDU1000_SLAVE_PCIE_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_pdm = { .name = "qhs_pdm", .id = QDU1000_SLAVE_PDM, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_pimem_cfg = { .name = "qhs_pimem_cfg", .id = QDU1000_SLAVE_PIMEM_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_prng = { .name = "qhs_prng", .id = QDU1000_SLAVE_PRNG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_qdss_cfg = { .name = "qhs_qdss_cfg", .id = QDU1000_SLAVE_QDSS_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_qpic = { .name = "qhs_qpic", .id = QDU1000_SLAVE_QPIC, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_qspi = { .name = "qhs_qspi", .id = QDU1000_SLAVE_QSPI_0, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_qup0 = { .name = "qhs_qup0", .id = QDU1000_SLAVE_QUP_0, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_qup1 = { .name = "qhs_qup1", .id = QDU1000_SLAVE_QUP_1, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_sdc2 = { .name = "qhs_sdc2", .id = QDU1000_SLAVE_SDCC_2, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_smbus_cfg = { .name = "qhs_smbus_cfg", .id = QDU1000_SLAVE_SMBUS_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_system_noc_cfg = { .name = "qhs_system_noc_cfg", .id = QDU1000_SLAVE_SNOC_CFG, .channels = 1, .buswidth = 4, .num_links = 1, .links = { QDU1000_MASTER_SNOC_CFG }, }; static struct qcom_icc_node qhs_tcsr = { .name = "qhs_tcsr", .id = QDU1000_SLAVE_TCSR, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_tlmm = { .name = "qhs_tlmm", .id = QDU1000_SLAVE_TLMM, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_tme_cfg = { .name = "qhs_tme_cfg", .id = QDU1000_SLAVE_TME_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_tsc_cfg = { .name = "qhs_tsc_cfg", .id = QDU1000_SLAVE_TSC_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_usb3 = { .name = "qhs_usb3", .id = QDU1000_SLAVE_USB3_0, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qhs_vsense_ctrl_cfg = { .name = "qhs_vsense_ctrl_cfg", .id = QDU1000_SLAVE_VSENSE_CTRL_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qns_a1noc_snoc = { .name = "qns_a1noc_snoc", .id = QDU1000_SLAVE_A1NOC_SNOC, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_MASTER_ANOC_SNOC }, }; static struct qcom_icc_node qns_anoc_snoc_gsi = { .name = "qns_anoc_snoc_gsi", .id = QDU1000_SLAVE_ANOC_SNOC_GSI, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_MASTER_ANOC_GSI }, }; static struct qcom_icc_node qns_ddrss_cfg = { .name = "qns_ddrss_cfg", .id = QDU1000_SLAVE_DDRSS_CFG, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node qns_ecpri_gemnoc = { .name = "qns_ecpri_gemnoc", .id = QDU1000_SLAVE_ECPRI_GEMNOC, .channels = 2, .buswidth = 32, .num_links = 1, .links = { QDU1000_MASTER_GEMNOC_ECPRI_DMA }, }; static struct qcom_icc_node qns_gemnoc_gc = { .name = "qns_gemnoc_gc", .id = QDU1000_SLAVE_SNOC_GEM_NOC_GC, .channels = 1, .buswidth = 8, .num_links = 1, .links = { QDU1000_MASTER_SNOC_GC_MEM_NOC }, }; static struct qcom_icc_node qns_gemnoc_sf = { .name = "qns_gemnoc_sf", .id = QDU1000_SLAVE_SNOC_GEM_NOC_SF, .channels = 1, .buswidth = 16, .num_links = 1, .links = { QDU1000_MASTER_SNOC_SF_MEM_NOC }, }; static struct qcom_icc_node qns_modem = { .name = "qns_modem", .id = QDU1000_SLAVE_MODEM_OFFLINE, .channels = 1, .buswidth = 32, .num_links = 0, }; static struct qcom_icc_node qns_pcie_gemnoc = { .name = "qns_pcie_gemnoc", .id = QDU1000_SLAVE_ANOC_PCIE_GEM_NOC, .channels = 1, .buswidth = 64, .num_links = 1, .links = { QDU1000_MASTER_ANOC_PCIE_GEM_NOC }, }; static struct qcom_icc_node qxs_imem = { .name = "qxs_imem", .id = QDU1000_SLAVE_IMEM, .channels = 1, .buswidth = 8, .num_links = 0, }; static struct qcom_icc_node qxs_pimem = { .name = "qxs_pimem", .id = QDU1000_SLAVE_PIMEM, .channels = 1, .buswidth = 8, .num_links = 0, }; static struct qcom_icc_node srvc_system_noc = { .name = "srvc_system_noc", .id = QDU1000_SLAVE_SERVICE_SNOC, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node xs_ethernet_ss = { .name = "xs_ethernet_ss", .id = QDU1000_SLAVE_ETHERNET_SS, .channels = 1, .buswidth = 32, .num_links = 0, }; static struct qcom_icc_node xs_pcie = { .name = "xs_pcie", .id = QDU1000_SLAVE_PCIE_0, .channels = 1, .buswidth = 64, .num_links = 0, }; static struct qcom_icc_node xs_qdss_stm = { .name = "xs_qdss_stm", .id = QDU1000_SLAVE_QDSS_STM, .channels = 1, .buswidth = 4, .num_links = 0, }; static struct qcom_icc_node xs_sys_tcu_cfg = { .name = "xs_sys_tcu_cfg", .id = QDU1000_SLAVE_TCU, .channels = 1, .buswidth = 8, .num_links = 0, }; static struct qcom_icc_bcm bcm_acv = { .name = "ACV", .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; static struct qcom_icc_bcm bcm_ce0 = { .name = "CE0", .num_nodes = 1, .nodes = { &qxm_crypto }, }; static struct qcom_icc_bcm bcm_cn0 = { .name = "CN0", .num_nodes = 44, .nodes = { &qhm_qpic, &qhm_qspi, &qnm_gemnoc_cnoc, &qnm_gemnoc_modem_slave, &qnm_gemnoc_pcie, &xm_sdc, &xm_usb3, &qhs_ahb2phy0_south, &qhs_ahb2phy1_north, &qhs_ahb2phy2_east, &qhs_aoss, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto_cfg, &qhs_ecpri_cfg, &qhs_imem_cfg, &qhs_ipc_router, &qhs_mss_cfg, &qhs_pcie_cfg, &qhs_pdm, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_qspi, &qhs_qup0, &qhs_qup1, &qhs_sdc2, &qhs_smbus_cfg, &qhs_system_noc_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_tme_cfg, &qhs_tsc_cfg, &qhs_usb3, &qhs_vsense_ctrl_cfg, &qns_ddrss_cfg, &qns_modem, &qxs_imem, &qxs_pimem, &xs_ethernet_ss, &xs_qdss_stm, &xs_sys_tcu_cfg }, }; static struct qcom_icc_bcm bcm_mc0 = { .name = "MC0", .num_nodes = 1, .nodes = { &ebi }, }; static struct qcom_icc_bcm bcm_qup0 = { .name = "QUP0", .num_nodes = 2, .nodes = { &qup0_core_slave, &qup1_core_slave }, }; static struct qcom_icc_bcm bcm_sh0 = { .name = "SH0", .num_nodes = 1, .nodes = { &qns_llcc }, }; static struct qcom_icc_bcm bcm_sh1 = { .name = "SH1", .num_nodes = 11, .nodes = { &alm_sys_tcu, &chm_apps, &qnm_ecpri_dma, &qnm_fec_2_gemnoc, &qnm_pcie, &qnm_snoc_gc, &qnm_snoc_sf, &qxm_mdsp, &qns_gem_noc_cnoc, &qns_modem_slave, &qns_pcie }, }; static struct qcom_icc_bcm bcm_sn0 = { .name = "SN0", .num_nodes = 1, .nodes = { &qns_gemnoc_sf }, }; static struct qcom_icc_bcm bcm_sn1 = { .name = "SN1", .num_nodes = 6, .nodes = { &qhm_gic, &qxm_pimem, &xm_gic, &xm_qdss_etr0, &xm_qdss_etr1, &qns_gemnoc_gc }, }; static struct qcom_icc_bcm bcm_sn2 = { .name = "SN2", .num_nodes = 5, .nodes = { &qnm_aggre_noc, &qxm_ecpri_gsi, &xm_ecpri_dma, &qns_anoc_snoc_gsi, &qns_ecpri_gemnoc }, }; static struct qcom_icc_bcm bcm_sn7 = { .name = "SN7", .num_nodes = 2, .nodes = { &qns_pcie_gemnoc, &xs_pcie }, }; static struct qcom_icc_bcm * const clk_virt_bcms[] = { &bcm_qup0, }; static struct qcom_icc_node * const clk_virt_nodes[] = { [MASTER_QUP_CORE_0] = &qup0_core_master, [MASTER_QUP_CORE_1] = &qup1_core_master, [SLAVE_QUP_CORE_0] = &qup0_core_slave, [SLAVE_QUP_CORE_1] = &qup1_core_slave, }; static const struct qcom_icc_desc qdu1000_clk_virt = { .nodes = clk_virt_nodes, .num_nodes = ARRAY_SIZE(clk_virt_nodes), .bcms = clk_virt_bcms, .num_bcms = ARRAY_SIZE(clk_virt_bcms), }; static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh1, }; static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_SYS_TCU] = &alm_sys_tcu, [MASTER_APPSS_PROC] = &chm_apps, [MASTER_GEMNOC_ECPRI_DMA] = &qnm_ecpri_dma, [MASTER_FEC_2_GEMNOC] = &qnm_fec_2_gemnoc, [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, [MASTER_MSS_PROC] = &qxm_mdsp, [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, [SLAVE_LLCC] = &qns_llcc, [SLAVE_GEMNOC_MODEM_CNOC] = &qns_modem_slave, [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie, }; static const struct qcom_icc_desc qdu1000_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, }; static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, }; static const struct qcom_icc_desc qdu1000_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_ce0, &bcm_cn0, &bcm_sn0, &bcm_sn1, &bcm_sn2, &bcm_sn7, }; static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_GIC_AHB] = &qhm_gic, [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QPIC] = &qhm_qpic, [MASTER_QSPI_0] = &qhm_qspi, [MASTER_QUP_0] = &qhm_qup0, [MASTER_QUP_1] = &qhm_qup1, [MASTER_SNOC_CFG] = &qhm_system_noc_cfg, [MASTER_ANOC_SNOC] = &qnm_aggre_noc, [MASTER_ANOC_GSI] = &qnm_aggre_noc_gsi, [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, [MASTER_GEMNOC_MODEM_CNOC] = &qnm_gemnoc_modem_slave, [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, [MASTER_CRYPTO] = &qxm_crypto, [MASTER_ECPRI_GSI] = &qxm_ecpri_gsi, [MASTER_PIMEM] = &qxm_pimem, [MASTER_SNOC_ECPRI_DMA] = &xm_ecpri_dma, [MASTER_GIC] = &xm_gic, [MASTER_PCIE] = &xm_pcie, [MASTER_QDSS_ETR] = &xm_qdss_etr0, [MASTER_QDSS_ETR_1] = &xm_qdss_etr1, [MASTER_SDCC_1] = &xm_sdc, [MASTER_USB3] = &xm_usb3, [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0_south, [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1_north, [SLAVE_AHB2PHY_EAST] = &qhs_ahb2phy2_east, [SLAVE_AOSS] = &qhs_aoss, [SLAVE_CLK_CTL] = &qhs_clk_ctl, [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx, [SLAVE_CRYPTO_0_CFG] = &qhs_crypto_cfg, [SLAVE_ECPRI_CFG] = &qhs_ecpri_cfg, [SLAVE_IMEM_CFG] = &qhs_imem_cfg, [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, [SLAVE_CNOC_MSS] = &qhs_mss_cfg, [SLAVE_PCIE_CFG] = &qhs_pcie_cfg, [SLAVE_PDM] = &qhs_pdm, [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, [SLAVE_PRNG] = &qhs_prng, [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, [SLAVE_QPIC] = &qhs_qpic, [SLAVE_QSPI_0] = &qhs_qspi, [SLAVE_QUP_0] = &qhs_qup0, [SLAVE_QUP_1] = &qhs_qup1, [SLAVE_SDCC_2] = &qhs_sdc2, [SLAVE_SMBUS_CFG] = &qhs_smbus_cfg, [SLAVE_SNOC_CFG] = &qhs_system_noc_cfg, [SLAVE_TCSR] = &qhs_tcsr, [SLAVE_TLMM] = &qhs_tlmm, [SLAVE_TME_CFG] = &qhs_tme_cfg, [SLAVE_TSC_CFG] = &qhs_tsc_cfg, [SLAVE_USB3_0] = &qhs_usb3, [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, [SLAVE_ANOC_SNOC_GSI] = &qns_anoc_snoc_gsi, [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg, [SLAVE_ECPRI_GEMNOC] = &qns_ecpri_gemnoc, [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc, [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, [SLAVE_MODEM_OFFLINE] = &qns_modem, [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc, [SLAVE_IMEM] = &qxs_imem, [SLAVE_PIMEM] = &qxs_pimem, [SLAVE_SERVICE_SNOC] = &srvc_system_noc, [SLAVE_ETHERNET_SS] = &xs_ethernet_ss, [SLAVE_PCIE_0] = &xs_pcie, [SLAVE_QDSS_STM] = &xs_qdss_stm, [SLAVE_TCU] = &xs_sys_tcu_cfg, }; static const struct qcom_icc_desc qdu1000_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, .num_bcms = ARRAY_SIZE(system_noc_bcms), }; static int qnoc_probe(struct platform_device *pdev) { int ret; ret = qcom_icc_rpmh_probe(pdev); if (ret) dev_err(&pdev->dev, "failed to register ICC provider\n"); return ret; } static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,qdu1000-clk-virt", .data = &qdu1000_clk_virt }, { .compatible = "qcom,qdu1000-gem-noc", .data = &qdu1000_gem_noc }, { .compatible = "qcom,qdu1000-mc-virt", .data = &qdu1000_mc_virt }, { .compatible = "qcom,qdu1000-system-noc", .data = &qdu1000_system_noc }, { } }; MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { .probe = qnoc_probe, .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-qdu1000", .of_match_table = qnoc_of_match, }, }; static int __init qnoc_driver_init(void) { return platform_driver_register(&qnoc_driver); } core_initcall(qnoc_driver_init); static void __exit qnoc_driver_exit(void) { platform_driver_unregister(&qnoc_driver); } module_exit(qnoc_driver_exit); MODULE_DESCRIPTION("QDU1000 NoC driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2000-2010 David Woodhouse <[email protected]> * Steven J. Hill <[email protected]> * Thomas Gleixner <[email protected]> * * Contains all ONFI related definitions */ #ifndef __LINUX_MTD_ONFI_H #define __LINUX_MTD_ONFI_H #include <linux/types.h> #include <linux/bitfield.h> /* ONFI version bits */ #define ONFI_VERSION_1_0 BIT(1) #define ONFI_VERSION_2_0 BIT(2) #define ONFI_VERSION_2_1 BIT(3) #define ONFI_VERSION_2_2 BIT(4) #define ONFI_VERSION_2_3 BIT(5) #define ONFI_VERSION_3_0 BIT(6) #define ONFI_VERSION_3_1 BIT(7) #define ONFI_VERSION_3_2 BIT(8) #define ONFI_VERSION_4_0 BIT(9) /* ONFI features */ #define ONFI_FEATURE_16_BIT_BUS BIT(0) #define ONFI_FEATURE_NV_DDR BIT(5) #define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7) /* ONFI timing mode, used in both asynchronous and synchronous mode */ #define ONFI_DATA_INTERFACE_SDR 0 #define ONFI_DATA_INTERFACE_NVDDR BIT(4) #define ONFI_DATA_INTERFACE_NVDDR2 BIT(5) #define ONFI_TIMING_MODE_0 BIT(0) #define ONFI_TIMING_MODE_1 BIT(1) #define ONFI_TIMING_MODE_2 BIT(2) #define ONFI_TIMING_MODE_3 BIT(3) #define ONFI_TIMING_MODE_4 BIT(4) #define ONFI_TIMING_MODE_5 BIT(5) #define ONFI_TIMING_MODE_UNKNOWN BIT(6) #define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x)) /* ONFI feature number/address */ #define ONFI_FEATURE_NUMBER 256 #define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 /* Vendor-specific feature address (Micron) */ #define ONFI_FEATURE_ADDR_READ_RETRY 0x89 #define ONFI_FEATURE_ON_DIE_ECC 0x90 #define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3) /* ONFI subfeature parameters length */ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ #define ONFI_OPT_CMD_READ_CACHE BIT(1) #define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2) struct nand_onfi_params { /* rev info and features block */ /* 'O' 'N' 'F' 'I' */ u8 sig[4]; __le16 revision; __le16 features; __le16 opt_cmd; u8 reserved0[2]; __le16 ext_param_page_length; /* since ONFI 2.1 */ u8 num_of_param_pages; /* since ONFI 2.1 */ u8 reserved1[17]; /* manufacturer information block */ char manufacturer[12]; char model[20]; u8 jedec_id; __le16 date_code; u8 reserved2[13]; /* memory organization block */ __le32 byte_per_page; __le16 spare_bytes_per_page; __le32 data_bytes_per_ppage; __le16 spare_bytes_per_ppage; __le32 pages_per_block; __le32 blocks_per_lun; u8 lun_count; u8 addr_cycles; u8 bits_per_cell; __le16 bb_per_lun; __le16 block_endurance; u8 guaranteed_good_blocks; __le16 guaranteed_block_endurance; u8 programs_per_page; u8 ppage_attr; u8 ecc_bits; u8 interleaved_bits; u8 interleaved_ops; u8 reserved3[13]; /* electrical parameter block */ u8 io_pin_capacitance_max; __le16 sdr_timing_modes; __le16 program_cache_timing_mode; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_ccs; u8 nvddr_timing_modes; u8 nvddr2_timing_modes; u8 nvddr_nvddr2_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; u8 input_pin_capacitance_max; u8 driver_strength_support; __le16 t_int_r; __le16 t_adl; u8 reserved4[8]; /* vendor */ __le16 vendor_revision; u8 vendor[88]; __le16 crc; } __packed; #define ONFI_CRC_BASE 0x4F4E /* Extended ECC information Block Definition (since ONFI 2.1) */ struct onfi_ext_ecc_info { u8 ecc_bits; u8 codeword_size; __le16 bb_per_lun; __le16 block_endurance; u8 reserved[2]; } __packed; #define ONFI_SECTION_TYPE_0 0 /* Unused section. */ #define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ #define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ struct onfi_ext_section { u8 type; u8 length; } __packed; #define ONFI_EXT_SECTION_MAX 8 /* Extended Parameter Page Definition (since ONFI 2.1) */ struct onfi_ext_param_page { __le16 crc; u8 sig[4]; /* 'E' 'P' 'P' 'S' */ u8 reserved0[10]; struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; /* * The actual size of the Extended Parameter Page is in * @ext_param_page_length of nand_onfi_params{}. * The following are the variable length sections. * So we do not add any fields below. Please see the ONFI spec. */ } __packed; /** * struct onfi_params - ONFI specific parameters that will be reused * @version: ONFI version (BCD encoded), 0 if ONFI is not supported * @tPROG: Page program time * @tBERS: Block erase time * @tR: Page read time * @tCCS: Change column setup time * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only) * @sdr_timing_modes: Supported asynchronous/SDR timing modes * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes * @vendor_revision: Vendor specific revision number * @vendor: Vendor specific data */ struct onfi_params { int version; u16 tPROG; u16 tBERS; u16 tR; u16 tCCS; bool fast_tCAD; u16 sdr_timing_modes; u16 nvddr_timing_modes; u16 vendor_revision; u8 vendor[88]; }; #endif /* __LINUX_MTD_ONFI_H */