code
stringlengths 0
23.9M
|
---|
// SPDX-License-Identifier: GPL-2.0
/*
* Versatile Express SPC CPUFreq Interface driver
*
* Copyright (C) 2013 - 2019 ARM Ltd.
* Sudeep Holla <[email protected]>
*
* Copyright (C) 2013 Linaro.
* Viresh Kumar <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
/* Currently we support only two clusters */
#define A15_CLUSTER 0
#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER
#include <asm/bL_switcher.h>
static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x))
#else
#define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0)
#define bL_switch_request(...) do { } while (0)
#define bL_switcher_put_enabled() do { } while (0)
#define bL_switcher_get_enabled() do { } while (0)
#endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
static unsigned int clk_big_min; /* (Big) clock frequencies */
static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
static DEFINE_PER_CPU(unsigned int, physical_cluster);
static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
static struct mutex cluster_lock[MAX_CLUSTERS];
static inline int raw_cpu_to_cluster(int cpu)
{
return topology_physical_package_id(cpu);
}
static inline int cpu_to_cluster(int cpu)
{
return is_bL_switching_enabled() ?
MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
static unsigned int find_cluster_maxfreq(int cluster)
{
int j;
u32 max_freq = 0, cpu_freq;
for_each_online_cpu(j) {
cpu_freq = per_cpu(cpu_last_req_freq, j);
if (cluster == per_cpu(physical_cluster, j) &&
max_freq < cpu_freq)
max_freq = cpu_freq;
}
return max_freq;
}
static unsigned int clk_get_cpu_rate(unsigned int cpu)
{
u32 cur_cluster = per_cpu(physical_cluster, cpu);
u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
/* For switcher we use virtual A7 clock rates */
if (is_bL_switching_enabled())
rate = VIRT_FREQ(cur_cluster, rate);
return rate;
}
static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
{
if (is_bL_switching_enabled())
return per_cpu(cpu_last_req_freq, cpu);
else
return clk_get_cpu_rate(cpu);
}
static unsigned int
ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
{
u32 new_rate, prev_rate;
int ret;
bool bLs = is_bL_switching_enabled();
mutex_lock(&cluster_lock[new_cluster]);
if (bLs) {
prev_rate = per_cpu(cpu_last_req_freq, cpu);
per_cpu(cpu_last_req_freq, cpu) = rate;
per_cpu(physical_cluster, cpu) = new_cluster;
new_rate = find_cluster_maxfreq(new_cluster);
new_rate = ACTUAL_FREQ(new_cluster, new_rate);
} else {
new_rate = rate;
}
ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
if (!ret) {
/*
* FIXME: clk_set_rate hasn't returned an error here however it
* may be that clk_change_rate failed due to hardware or
* firmware issues and wasn't able to report that due to the
* current design of the clk core layer. To work around this
* problem we will read back the clock rate and check it is
* correct. This needs to be removed once clk core is fixed.
*/
if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
ret = -EIO;
}
if (WARN_ON(ret)) {
if (bLs) {
per_cpu(cpu_last_req_freq, cpu) = prev_rate;
per_cpu(physical_cluster, cpu) = old_cluster;
}
mutex_unlock(&cluster_lock[new_cluster]);
return ret;
}
mutex_unlock(&cluster_lock[new_cluster]);
/* Recalc freq for old cluster when switching clusters */
if (old_cluster != new_cluster) {
/* Switch cluster */
bL_switch_request(cpu, new_cluster);
mutex_lock(&cluster_lock[old_cluster]);
/* Set freq of old cluster if there are cpus left on it */
new_rate = find_cluster_maxfreq(old_cluster);
new_rate = ACTUAL_FREQ(old_cluster, new_rate);
if (new_rate &&
clk_set_rate(clk[old_cluster], new_rate * 1000)) {
pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
__func__, ret, old_cluster);
}
mutex_unlock(&cluster_lock[old_cluster]);
}
return 0;
}
/* Set clock frequency */
static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
freqs_new = freq_table[cur_cluster][index].frequency;
if (is_bL_switching_enabled()) {
if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
new_cluster = A7_CLUSTER;
else if (actual_cluster == A7_CLUSTER &&
freqs_new > clk_little_max)
new_cluster = A15_CLUSTER;
}
return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
freqs_new);
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
{
int count;
for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
;
return count;
}
/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
u32 min_freq = ~0;
cpufreq_for_each_entry(pos, table)
if (pos->frequency < min_freq)
min_freq = pos->frequency;
return min_freq;
}
/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
u32 max_freq = 0;
cpufreq_for_each_entry(pos, table)
if (pos->frequency > max_freq)
max_freq = pos->frequency;
return max_freq;
}
static bool search_frequency(struct cpufreq_frequency_table *table, int size,
unsigned int freq)
{
int count;
for (count = 0; count < size; count++) {
if (table[count].frequency == freq)
return true;
}
return false;
}
static int merge_cluster_tables(void)
{
int i, j, k = 0, count = 1;
struct cpufreq_frequency_table *table;
for (i = 0; i < MAX_CLUSTERS; i++)
count += get_table_count(freq_table[i]);
table = kcalloc(count, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
freq_table[MAX_CLUSTERS] = table;
/* Add in reverse order to get freqs in increasing order */
for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
j++) {
if (i == A15_CLUSTER &&
search_frequency(table, count, freq_table[i][j].frequency))
continue; /* skip duplicates */
table[k++].frequency =
VIRT_FREQ(i, freq_table[i][j].frequency);
}
}
table[k].driver_data = k;
table[k].frequency = CPUFREQ_TABLE_END;
return 0;
}
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
if (!freq_table[cluster])
return;
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
if (atomic_dec_return(&cluster_usage[cluster]))
return;
if (cluster < MAX_CLUSTERS)
return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return;
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
int ret;
if (freq_table[cluster])
return 0;
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
if (ret)
goto out;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret)
goto out;
clk[cluster] = clk_get(cpu_dev, NULL);
if (!IS_ERR(clk[cluster]))
return 0;
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
__func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
if (atomic_inc_return(&cluster_usage[cluster]) != 1)
return 0;
if (cluster < MAX_CLUSTERS) {
ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/*
* Get data for all clusters and fill virtual cluster with a merge of
* both
*/
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return -ENODEV;
ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
if (ret)
goto put_clusters;
}
ret = merge_cluster_tables();
if (ret)
goto put_clusters;
/* Assuming 2 cluster, set clk_big_min and clk_little_max */
clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
clk_little_max = VIRT_FREQ(A7_CLUSTER,
get_table_max(freq_table[A7_CLUSTER]));
return 0;
put_clusters:
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return -ENODEV;
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/* Per-CPU initialization */
static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
{
u32 cur_cluster = cpu_to_cluster(policy->cpu);
struct device *cpu_dev;
int ret;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
return -ENODEV;
}
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
for_each_cpu(cpu, policy->cpus)
per_cpu(physical_cluster, cpu) = cur_cluster;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
if (ret)
return ret;
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}
static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
return;
}
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ve_spc_cpufreq_set_target,
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg)
{
pr_debug("%s: action: %ld\n", __func__, action);
switch (action) {
case BL_NOTIFY_PRE_ENABLE:
case BL_NOTIFY_PRE_DISABLE:
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
break;
case BL_NOTIFY_POST_ENABLE:
set_switching_enabled(true);
cpufreq_register_driver(&ve_spc_cpufreq_driver);
break;
case BL_NOTIFY_POST_DISABLE:
set_switching_enabled(false);
cpufreq_register_driver(&ve_spc_cpufreq_driver);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier,
};
static int __bLs_register_notifier(void)
{
return bL_switcher_register_notifier(&bL_switcher_notifier);
}
static int __bLs_unregister_notifier(void)
{
return bL_switcher_unregister_notifier(&bL_switcher_notifier);
}
#else
static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
int ret, i;
set_switching_enabled(bL_switcher_get_enabled());
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
if (!is_bL_switching_enabled())
ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ve_spc_cpufreq_driver.name, ret);
} else {
ret = __bLs_register_notifier();
if (ret)
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
else
pr_info("%s: Registered platform driver: %s\n",
__func__, ve_spc_cpufreq_driver.name);
}
bL_switcher_put_enabled();
return ret;
}
static void ve_spc_cpufreq_remove(struct platform_device *pdev)
{
bL_switcher_get_enabled();
__bLs_unregister_notifier();
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
ve_spc_cpufreq_driver.name);
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
.driver = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
.remove = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
MODULE_ALIAS("platform:vexpress-spc-cpufreq");
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
MODULE_LICENSE("GPL v2");
|
#ifndef _PERF_RWSEM_H
#define _PERF_RWSEM_H
#include <pthread.h>
#include "mutex.h"
/*
* Mutexes have additional error checking. Enable to use a mutex rather than a
* rwlock for debugging.
*/
#define RWS_ERRORCHECK 0
struct rw_semaphore {
#if RWS_ERRORCHECK
struct mutex mtx;
#else
pthread_rwlock_t lock;
#endif
};
int init_rwsem(struct rw_semaphore *sem);
int exit_rwsem(struct rw_semaphore *sem);
int down_read(struct rw_semaphore *sem);
int up_read(struct rw_semaphore *sem);
int down_write(struct rw_semaphore *sem);
int up_write(struct rw_semaphore *sem);
#endif /* _PERF_RWSEM_H */
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int count = 0;
SEC("freplace")
int entry_freplace(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return count;
}
char __license[] SEC("license") = "GPL";
|
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include "rt3050.dtsi"
/ {
compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
model = "Ralink RT3052 evaluation board";
memory@0 {
device_type = "memory";
reg = <0x0 0x2000000>;
};
chosen {
bootargs = "console=ttyS0,57600";
};
cfi@1f000000 {
compatible = "cfi-flash";
reg = <0x1f000000 0x800000>;
bank-width = <2>;
device-width = <2>;
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
label = "uboot";
reg = <0x0 0x30000>;
read-only;
};
partition@30000 {
label = "uboot-env";
reg = <0x30000 0x10000>;
read-only;
};
partition@40000 {
label = "calibration";
reg = <0x40000 0x10000>;
read-only;
};
partition@50000 {
label = "linux";
reg = <0x50000 0x7b0000>;
};
};
usb@101c0000 {
status = "okay";
};
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
* All rights reserved.
*/
#include <linux/bitfield.h>
#include "wlan_if.h"
#include "wlan.h"
#include "wlan_cfg.h"
#include "netdev.h"
enum cfg_cmd_type {
CFG_BYTE_CMD = 0,
CFG_HWORD_CMD = 1,
CFG_WORD_CMD = 2,
CFG_STR_CMD = 3,
CFG_BIN_CMD = 4
};
static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
{WID_TX_POWER, 0},
{WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
static const struct wilc_cfg_hword g_cfg_hword[] = {
{WID_NIL, 0}
};
static const struct wilc_cfg_word g_cfg_word[] = {
{WID_FAILED_COUNT, 0},
{WID_RECEIVED_FRAGMENT_COUNT, 0},
{WID_SUCCESS_FRAME_COUNT, 0},
{WID_GET_INACTIVE_TIME, 0},
{WID_NIL, 0}
};
static const struct wilc_cfg_str g_cfg_str[] = {
{WID_FIRMWARE_VERSION, NULL},
{WID_MAC_ADDR, NULL},
{WID_ASSOC_RES_INFO, NULL},
{WID_NIL, NULL}
};
#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
#define WILC_RESP_MSG_TYPE_STATUS_INFO 'I'
#define WILC_RESP_MSG_TYPE_NETWORK_INFO 'N'
#define WILC_RESP_MSG_TYPE_SCAN_COMPLETE 'S'
/********************************************
*
* Configuration Functions
*
********************************************/
static int wilc_wlan_cfg_set_byte(u8 *frame, u32 offset, u16 id, u8 val8)
{
if ((offset + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
put_unaligned_le16(1, &frame[offset + 2]);
frame[offset + 4] = val8;
return 5;
}
static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16)
{
if ((offset + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
put_unaligned_le16(2, &frame[offset + 2]);
put_unaligned_le16(val16, &frame[offset + 4]);
return 6;
}
static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
{
if ((offset + 7) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
put_unaligned_le16(4, &frame[offset + 2]);
put_unaligned_le32(val32, &frame[offset + 4]);
return 8;
}
static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str,
u32 size)
{
if ((offset + size + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
put_unaligned_le16(size, &frame[offset + 2]);
if (str && size != 0)
memcpy(&frame[offset + 4], str, size);
return (size + 4);
}
static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
{
u32 i;
u8 checksum = 0;
if ((offset + size + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
put_unaligned_le16(size, &frame[offset + 2]);
if ((b) && size != 0) {
memcpy(&frame[offset + 4], b, size);
for (i = 0; i < size; i++)
checksum += frame[offset + i + 4];
}
frame[offset + size + 4] = checksum;
return (size + 5);
}
/********************************************
*
* Configuration Response Functions
*
********************************************/
static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
{
u16 wid;
u32 len = 0, i = 0;
struct wilc_cfg *cfg = &wl->cfg;
while (size > 0) {
i = 0;
wid = get_unaligned_le16(info);
switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
if (cfg->b[i].id == wid)
cfg->b[i].val = info[4];
len = 3;
break;
case WID_SHORT:
while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
if (cfg->hw[i].id == wid)
cfg->hw[i].val = get_unaligned_le16(&info[4]);
len = 4;
break;
case WID_INT:
while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
if (cfg->w[i].id == wid)
cfg->w[i].val = get_unaligned_le32(&info[4]);
len = 6;
break;
case WID_STR:
while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
if (cfg->s[i].id == wid)
memcpy(cfg->s[i].str, &info[2],
get_unaligned_le16(&info[2]) + 2);
len = 2 + get_unaligned_le16(&info[2]);
break;
default:
break;
}
size -= (2 + len);
info += (2 + len);
}
}
static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
{
u32 wid, len;
wid = get_unaligned_le16(info);
len = info[2];
if (len == 1 && wid == WID_STATUS) {
int i = 0;
while (wl->cfg.b[i].id != WID_NIL &&
wl->cfg.b[i].id != wid)
i++;
if (wl->cfg.b[i].id == wid)
wl->cfg.b[i].val = info[3];
}
}
/********************************************
*
* Configuration Exported Functions
*
********************************************/
int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
{
u8 type = FIELD_GET(WILC_WID_TYPE, id);
int ret = 0;
switch (type) {
case CFG_BYTE_CMD:
if (size >= 1)
ret = wilc_wlan_cfg_set_byte(frame, offset, id, *buf);
break;
case CFG_HWORD_CMD:
if (size >= 2)
ret = wilc_wlan_cfg_set_hword(frame, offset, id,
*((u16 *)buf));
break;
case CFG_WORD_CMD:
if (size >= 4)
ret = wilc_wlan_cfg_set_word(frame, offset, id,
*((u32 *)buf));
break;
case CFG_STR_CMD:
ret = wilc_wlan_cfg_set_str(frame, offset, id, buf, size);
break;
case CFG_BIN_CMD:
ret = wilc_wlan_cfg_set_bin(frame, offset, id, buf, size);
break;
}
return ret;
}
int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
{
if ((offset + 2) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
put_unaligned_le16(id, &frame[offset]);
return 2;
}
int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
u32 buffer_size)
{
u8 type = FIELD_GET(WILC_WID_TYPE, wid);
int i, ret = 0;
struct wilc_cfg *cfg = &wl->cfg;
i = 0;
if (type == CFG_BYTE_CMD) {
while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
if (cfg->b[i].id == wid) {
memcpy(buffer, &cfg->b[i].val, 1);
ret = 1;
}
} else if (type == CFG_HWORD_CMD) {
while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
if (cfg->hw[i].id == wid) {
memcpy(buffer, &cfg->hw[i].val, 2);
ret = 2;
}
} else if (type == CFG_WORD_CMD) {
while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
if (cfg->w[i].id == wid) {
memcpy(buffer, &cfg->w[i].val, 4);
ret = 4;
}
} else if (type == CFG_STR_CMD) {
while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
if (cfg->s[i].id == wid) {
u16 size = get_unaligned_le16(cfg->s[i].str);
if (buffer_size >= size) {
memcpy(buffer, &cfg->s[i].str[2], size);
ret = size;
}
}
}
return ret;
}
void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
struct wilc_cfg_rsp *rsp)
{
u8 msg_type;
u8 msg_id;
msg_type = frame[0];
msg_id = frame[1]; /* seq no */
frame += 4;
size -= 4;
rsp->type = 0;
switch (msg_type) {
case WILC_RESP_MSG_TYPE_CONFIG_REPLY:
wilc_wlan_parse_response_frame(wilc, frame, size);
rsp->type = WILC_CFG_RSP;
rsp->seq_no = msg_id;
break;
case WILC_RESP_MSG_TYPE_STATUS_INFO:
wilc_wlan_parse_info_frame(wilc, frame);
rsp->type = WILC_CFG_RSP_STATUS;
rsp->seq_no = msg_id;
/* call host interface info parse as well */
wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
break;
case WILC_RESP_MSG_TYPE_NETWORK_INFO:
wilc_network_info_received(wilc, frame - 4, size + 4);
break;
case WILC_RESP_MSG_TYPE_SCAN_COMPLETE:
wilc_scan_complete_received(wilc, frame - 4, size + 4);
break;
default:
rsp->seq_no = msg_id;
break;
}
}
int wilc_wlan_cfg_init(struct wilc *wl)
{
struct wilc_cfg_str_vals *str_vals;
int i = 0;
wl->cfg.b = kmemdup(g_cfg_byte, sizeof(g_cfg_byte), GFP_KERNEL);
if (!wl->cfg.b)
return -ENOMEM;
wl->cfg.hw = kmemdup(g_cfg_hword, sizeof(g_cfg_hword), GFP_KERNEL);
if (!wl->cfg.hw)
goto out_b;
wl->cfg.w = kmemdup(g_cfg_word, sizeof(g_cfg_word), GFP_KERNEL);
if (!wl->cfg.w)
goto out_hw;
wl->cfg.s = kmemdup(g_cfg_str, sizeof(g_cfg_str), GFP_KERNEL);
if (!wl->cfg.s)
goto out_w;
str_vals = kzalloc(sizeof(*str_vals), GFP_KERNEL);
if (!str_vals)
goto out_s;
wl->cfg.str_vals = str_vals;
/* store the string cfg parameters */
wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
wl->cfg.s[i].str = str_vals->firmware_version;
i++;
wl->cfg.s[i].id = WID_MAC_ADDR;
wl->cfg.s[i].str = str_vals->mac_address;
i++;
wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
wl->cfg.s[i].str = str_vals->assoc_rsp;
i++;
wl->cfg.s[i].id = WID_NIL;
wl->cfg.s[i].str = NULL;
return 0;
out_s:
kfree(wl->cfg.s);
out_w:
kfree(wl->cfg.w);
out_hw:
kfree(wl->cfg.hw);
out_b:
kfree(wl->cfg.b);
return -ENOMEM;
}
void wilc_wlan_cfg_deinit(struct wilc *wl)
{
kfree(wl->cfg.b);
kfree(wl->cfg.hw);
kfree(wl->cfg.w);
kfree(wl->cfg.s);
kfree(wl->cfg.str_vals);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 Linutronix GmbH
* Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/bitmap.h>
#include "ubi.h"
/**
* init_seen - allocate memory for used for debugging.
* @ubi: UBI device description object
*/
static inline unsigned long *init_seen(struct ubi_device *ubi)
{
unsigned long *ret;
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
return ret;
}
/**
* free_seen - free the seen logic integer array.
* @seen: integer array of @ubi->peb_count size
*/
static inline void free_seen(unsigned long *seen)
{
bitmap_free(seen);
}
/**
* set_seen - mark a PEB as seen.
* @ubi: UBI device description object
* @pnum: The PEB to be makred as seen
* @seen: integer array of @ubi->peb_count size
*/
static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
{
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return;
set_bit(pnum, seen);
}
/**
* self_check_seen - check whether all PEB have been seen by fastmap.
* @ubi: UBI device description object
* @seen: integer array of @ubi->peb_count size
*/
static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
{
int pnum, ret = 0;
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
}
return ret;
}
/**
* ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
* @ubi: UBI device description object
*/
size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
size = sizeof(struct ubi_fm_sb) +
sizeof(struct ubi_fm_hdr) +
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
((sizeof(struct ubi_fm_eba) +
sizeof(struct ubi_fm_volhdr)) *
(UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
(ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
/**
* new_fm_vbuf() - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*
* Returns a new struct ubi_vid_hdr on success.
* NULL indicates out of memory.
*/
static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
{
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!new)
goto out;
vh = ubi_get_vid_hdr(new);
vh->vol_type = UBI_VID_DYNAMIC;
vh->vol_id = cpu_to_be32(vol_id);
/* UBI implementations without fastmap support have to delete the
* fastmap.
*/
vh->compat = UBI_COMPAT_DELETE;
out:
return new;
}
/**
* add_aeb - create and add a attach erase block to a given list.
* @ai: UBI attach info object
* @list: the target list
* @pnum: PEB number of the new attach erase block
* @ec: erease counter of the new LEB
* @scrub: scrub this PEB after attaching
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
int pnum, int ec, int scrub)
{
struct ubi_ainf_peb *aeb;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->lnum = -1;
aeb->scrub = scrub;
aeb->copy_flag = aeb->sqnum = 0;
ai->ec_sum += aeb->ec;
ai->ec_count++;
if (ai->max_ec < aeb->ec)
ai->max_ec = aeb->ec;
if (ai->min_ec > aeb->ec)
ai->min_ec = aeb->ec;
list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_vol - create and add a new volume to ubi_attach_info.
* @ai: ubi_attach_info object
* @vol_id: VID of the new volume
* @used_ebs: number of used EBS
* @data_pad: data padding value of the new volume
* @vol_type: volume type
* @last_eb_bytes: number of bytes in the last LEB
*
* Returns the new struct ubi_ainf_volume on success.
* NULL indicates an error.
*/
static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
int used_ebs, int data_pad, u8 vol_type,
int last_eb_bytes)
{
struct ubi_ainf_volume *av;
av = ubi_add_av(ai, vol_id);
if (IS_ERR(av))
return av;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = used_ebs;
dbg_bld("found volume (ID %i)", vol_id);
return av;
}
/**
* assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
* from it's original list.
* @ai: ubi_attach_info object
* @aeb: the to be assigned SEB
* @av: target scan volume
*/
static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_peb *aeb,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
struct rb_node **p = &av->root.rb_node, *parent = NULL;
while (*p) {
parent = *p;
tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (aeb->lnum != tmp_aeb->lnum) {
if (aeb->lnum < tmp_aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
} else
break;
}
list_del(&aeb->u.list);
av->leb_count++;
rb_link_node(&aeb->u.rb, parent, p);
rb_insert_color(&aeb->u.rb, &av->root);
}
/**
* update_vol - inserts or updates a LEB which was found a pool.
* @ubi: the UBI device object
* @ai: attach info object
* @av: the volume this LEB belongs to
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
struct rb_node **p = &av->root.rb_node, *parent = NULL;
struct ubi_ainf_peb *aeb, *victim;
int cmp_res;
while (*p) {
parent = *p;
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
}
/* This case can happen if the fastmap gets written
* because of a volume change (creation, deletion, ..).
* Then a PEB can be within the persistent EBA and the pool.
*/
if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
ubi_free_aeb(ai, new_aeb);
return 0;
}
cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
if (cmp_res < 0)
return cmp_res;
/* new_aeb is newer */
if (cmp_res & 1) {
victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
av->last_data_size =
be32_to_cpu(new_vh->data_size);
dbg_bld("vol %i: AEB %i's PEB %i is the newer",
av->vol_id, aeb->lnum, new_aeb->pnum);
aeb->ec = new_aeb->ec;
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
aeb->sqnum = new_aeb->sqnum;
ubi_free_aeb(ai, new_aeb);
/* new_aeb is older */
} else {
dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
av->vol_id, aeb->lnum, new_aeb->pnum);
list_add_tail(&new_aeb->u.list, &ai->erase);
}
return 0;
}
/* This LEB is new, let's add it to the volume */
if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
av->highest_lnum = be32_to_cpu(new_vh->lnum);
av->last_data_size = be32_to_cpu(new_vh->data_size);
}
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = be32_to_cpu(new_vh->used_ebs);
av->leb_count++;
rb_link_node(&new_aeb->u.rb, parent, p);
rb_insert_color(&new_aeb->u.rb, &av->root);
return 0;
}
/**
* process_pool_aeb - we found a non-empty PEB in a pool.
* @ubi: UBI device object
* @ai: attach info object
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
int vol_id = be32_to_cpu(new_vh->vol_id);
struct ubi_ainf_volume *av;
if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
ubi_free_aeb(ai, new_aeb);
return 0;
}
/* Find the volume this SEB belongs to */
av = ubi_find_av(ai, vol_id);
if (!av) {
ubi_err(ubi, "orphaned volume in fastmap pool!");
ubi_free_aeb(ai, new_aeb);
return UBI_BAD_FASTMAP;
}
ubi_assert(vol_id == av->vol_id);
return update_vol(ubi, ai, av, new_vh, new_aeb);
}
/**
* unmap_peb - unmap a PEB.
* If fastmap detects a free PEB in the pool it has to check whether
* this PEB has been unmapped after writing the fastmap.
*
* @ai: UBI attach info object
* @pnum: The PEB to be unmapped
*/
static void unmap_peb(struct ubi_attach_info *ai, int pnum)
{
struct ubi_ainf_volume *av;
struct rb_node *node, *node2;
struct ubi_ainf_peb *aeb;
ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
av->leb_count--;
ubi_free_aeb(ai, aeb);
return;
}
}
}
}
/**
* scan_pool - scans a pool for changed (no longer empty PEBs).
* @ubi: UBI device object
* @ai: attach info object
* @pebs: an array of all PEB numbers in the to be scanned pool
* @pool_size: size of the pool (number of entries in @pebs)
* @max_sqnum: pointer to the maximal sequence number
* @free: list of PEBs which are most likely free (and go into @ai->free)
*
* Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
* < 0 indicates an internal error.
*/
static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *free)
{
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_ainf_peb *new_aeb;
int i, pnum, err, ret = 0;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
return -ENOMEM;
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
kfree(ech);
return -ENOMEM;
}
vh = ubi_get_vid_hdr(vb);
dbg_bld("scanning fastmap pool: size = %i", pool_size);
/*
* Now scan all PEBs in the pool to find changes which have been made
* after the creation of the fastmap
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
int image_seq;
pnum = be32_to_cpu(pebs[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ubi_err(ubi, "bad PEB in fastmap pool!");
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
if (err == UBI_IO_FF_BITFLIPS)
scrub = 1;
ret = add_aeb(ai, free, pnum, ec, scrub);
if (ret)
goto out;
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
if (err == UBI_IO_BITFLIPS)
scrub = 1;
new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
if (!new_aeb) {
ret = -ENOMEM;
goto out;
}
new_aeb->lnum = be32_to_cpu(vh->lnum);
new_aeb->sqnum = be64_to_cpu(vh->sqnum);
new_aeb->copy_flag = vh->copy_flag;
new_aeb->scrub = scrub;
if (*max_sqnum < new_aeb->sqnum)
*max_sqnum = new_aeb->sqnum;
err = process_pool_aeb(ubi, ai, vh, new_aeb);
if (err) {
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
} else {
/* We are paranoid and fall back to scanning mode */
ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
}
out:
ubi_free_vid_buf(vb);
kfree(ech);
return ret;
}
/**
* count_fastmap_pebs - Counts the PEBs found by fastmap.
* @ai: The UBI attach info object
*/
static int count_fastmap_pebs(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb;
struct ubi_ainf_volume *av;
struct rb_node *rb1, *rb2;
int n = 0;
list_for_each_entry(aeb, &ai->erase, u.list)
n++;
list_for_each_entry(aeb, &ai->free, u.list)
n++;
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
n++;
return n;
}
/**
* ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
* @ubi: UBI device object
* @ai: UBI attach info object
* @fm: the fastmap to be attached
*
* Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
* < 0 indicates an internal error.
*/
static int ubi_attach_fastmap(struct ubi_device *ubi,
struct ubi_attach_info *ai,
struct ubi_fastmap_layout *fm)
{
struct list_head used, free;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmhdr;
struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fmec;
struct ubi_fm_volhdr *fmvhdr;
struct ubi_fm_eba *fm_eba;
int ret, i, j, pool_size, wl_pool_size;
size_t fm_pos = 0, fm_size = ubi->fm_size;
unsigned long long max_sqnum = 0;
void *fm_raw = ubi->fm_buf;
INIT_LIST_HEAD(&used);
INIT_LIST_HEAD(&free);
ai->min_ec = UBI_MAX_ERASECOUNTER;
fmsb = (struct ubi_fm_sb *)(fm_raw);
ai->max_sqnum = fmsb->sqnum;
fm_pos += sizeof(struct ubi_fm_sb);
if (fm_pos >= fm_size)
goto fail_bad;
fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
goto fail_bad;
}
fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl_wl);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
pool_size = be16_to_cpu(fmpl->size);
wl_pool_size = be16_to_cpu(fmpl_wl->size);
fm->max_pool_size = be16_to_cpu(fmpl->max_size);
fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
ubi_err(ubi, "bad pool size: %i", pool_size);
goto fail_bad;
}
if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
goto fail_bad;
}
if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_pool_size < 0) {
ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
goto fail_bad;
}
if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_wl_pool_size < 0) {
ubi_err(ubi, "bad maximal WL pool size: %i",
fm->max_wl_pool_size);
goto fail_bad;
}
/* read EC values from free list */
for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
}
/* read EC values from used list */
for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
}
/* read EC values from scrub list */
for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
}
/* read EC values from erase list */
for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
}
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
/* Iterate over all volumes and read their EBA table */
for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmvhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
goto fail_bad;
}
av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
be32_to_cpu(fmvhdr->used_ebs),
be32_to_cpu(fmvhdr->data_pad),
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
if (IS_ERR(av)) {
if (PTR_ERR(av) == -EEXIST)
ubi_err(ubi, "volume (ID %i) already exists",
fmvhdr->vol_id);
goto fail_bad;
}
ai->vols_found++;
if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*fm_eba);
fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
goto fail_bad;
}
for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
int pnum = be32_to_cpu(fm_eba->pnum[j]);
if (pnum < 0)
continue;
aeb = NULL;
list_for_each_entry(tmp_aeb, &used, u.list) {
if (tmp_aeb->pnum == pnum) {
aeb = tmp_aeb;
break;
}
}
if (!aeb) {
ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
goto fail_bad;
}
aeb->lnum = j;
if (av->highest_lnum <= aeb->lnum)
av->highest_lnum = aeb->lnum;
assign_aeb_to_av(ai, aeb, av);
dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
aeb->pnum, aeb->lnum, av->vol_id);
}
}
ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
if (ret)
goto fail;
ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
if (ret)
goto fail;
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->erase);
ubi_assert(list_empty(&free));
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
* We do this here because in ubi_wl_init() it's too late
* and we cannot fall back to scanning.
*/
if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
ai->bad_peb_count - fm->used_blocks))
goto fail_bad;
return 0;
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
list_del(&tmp_aeb->u.list);
ubi_free_aeb(ai, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
ubi_free_aeb(ai, tmp_aeb);
}
return ret;
}
/**
* find_fm_anchor - find the most recent Fastmap superblock (anchor)
* @ai: UBI attach info to be filled
*/
static int find_fm_anchor(struct ubi_attach_info *ai)
{
int ret = -1;
struct ubi_ainf_peb *aeb;
unsigned long long max_sqnum = 0;
list_for_each_entry(aeb, &ai->fastmap, u.list) {
if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
max_sqnum = aeb->sqnum;
ret = aeb->pnum;
}
}
return ret;
}
static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
struct ubi_ainf_peb *old)
{
struct ubi_ainf_peb *new;
new = ubi_alloc_aeb(ai, old->pnum, old->ec);
if (!new)
return NULL;
new->vol_id = old->vol_id;
new->sqnum = old->sqnum;
new->lnum = old->lnum;
new->scrub = old->scrub;
new->copy_flag = old->copy_flag;
return new;
}
/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
* @scan_ai: UBI attach info from the first 64 PEBs,
* used to find the most recent Fastmap data structure
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
struct ubi_ainf_peb *aeb;
int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
fm_anchor = find_fm_anchor(scan_ai);
if (fm_anchor < 0)
return UBI_NO_FASTMAP;
/* Copy all (possible) fastmap blocks into our new attach structure. */
list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
struct ubi_ainf_peb *new;
new = clone_aeb(ai, aeb);
if (!new)
return -ENOMEM;
list_add(&new->u.list, &ai->fastmap);
}
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
if (!fmsb) {
ret = -ENOMEM;
goto out;
}
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm) {
ret = -ENOMEM;
kfree(fmsb);
goto out;
}
ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
if (ret && ret != UBI_IO_BITFLIPS)
goto free_fm_sb;
else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[0] = 1;
if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
if (fmsb->version != UBI_FM_FMT_VERSION) {
ubi_err(ubi, "bad fastmap version: %i, expected: %i",
fmsb->version, UBI_FM_FMT_VERSION);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
used_blocks = be32_to_cpu(fmsb->used_blocks);
if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
ubi_err(ubi, "number of fastmap blocks is invalid: %i",
used_blocks);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
fm_size = ubi->leb_size * used_blocks;
if (fm_size != ubi->fm_size) {
ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
fm_size, ubi->fm_size);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech) {
ret = -ENOMEM;
goto free_fm_sb;
}
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
ret = -ENOMEM;
goto free_hdr;
}
vh = ubi_get_vid_hdr(vb);
for (i = 0; i < used_blocks; i++) {
int image_seq;
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
if (i == 0 && pnum != fm_anchor) {
ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
pnum, fm_anchor);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
i, pnum);
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
ubi->image_seq = image_seq;
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err(ubi, "wrong image seq:%d instead of %d",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum);
goto free_hdr;
}
if (i == 0) {
if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_SB_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
} else {
if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_DATA_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
}
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
pnum, 0, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
"err: %i)", i, pnum, ret);
goto free_hdr;
}
}
kfree(fmsb);
fmsb = NULL;
fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
tmp_crc = be32_to_cpu(fmsb2->data_crc);
fmsb2->data_crc = 0;
crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
if (crc != tmp_crc) {
ubi_err(ubi, "fastmap data CRC is invalid");
ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
tmp_crc, crc);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
fmsb2->sqnum = sqnum;
fm->used_blocks = used_blocks;
ret = ubi_attach_fastmap(ubi, ai, fm);
if (ret) {
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
for (i = 0; i < used_blocks; i++) {
struct ubi_wl_entry *e;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
}
e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
e->ec = be32_to_cpu(fmsb2->block_ec[i]);
fm->e[i] = e;
}
ubi->fm = fm;
ubi->fm_pool.max_size = ubi->fm->max_pool_size;
ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
ubi_msg(ubi, "attached by fastmap");
ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg(ubi, "fastmap WL pool size: %d",
ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
ubi->fast_attach = 1;
ubi_free_vid_buf(vb);
kfree(ech);
out:
up_write(&ubi->fm_protect);
if (ret == UBI_BAD_FASTMAP)
ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
return ret;
free_hdr:
ubi_free_vid_buf(vb);
kfree(ech);
free_fm_sb:
kfree(fmsb);
kfree(fm);
goto out;
}
int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
{
struct ubi_device *ubi = vol->ubi;
if (!ubi->fast_attach)
return 0;
vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
if (!vol->checkmap)
return -ENOMEM;
return 0;
}
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
bitmap_free(vol->checkmap);
}
/**
* ubi_write_fastmap - writes a fastmap.
* @ubi: UBI device object
* @new_fm: the to be written fastmap
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fastmap_layout *new_fm)
{
size_t fm_pos = 0;
void *fm_raw;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmh;
struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fec;
struct ubi_fm_volhdr *fvh;
struct ubi_fm_eba *feba;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
struct ubi_vid_io_buf *avbuf, *dvbuf;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!avbuf) {
ret = -ENOMEM;
goto out;
}
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
dvhdr = ubi_get_vid_hdr(dvbuf);
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
spin_lock(&ubi->wl_lock);
fmsb = (struct ubi_fm_sb *)fm_raw;
fm_pos += sizeof(*fmsb);
ubi_assert(fm_pos <= ubi->fm_size);
fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmh);
ubi_assert(fm_pos <= ubi->fm_size);
fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
fmsb->version = UBI_FM_FMT_VERSION;
fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
/* the max sqnum will be filled in while *reading* the fastmap */
fmsb->sqnum = 0;
fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
free_peb_count = 0;
used_peb_count = 0;
scrub_peb_count = 0;
erase_peb_count = 0;
vol_count = 0;
fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl);
fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl->size = cpu_to_be16(ubi->fm_pool.size);
fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
for (i = 0; i < ubi->fm_pool.size; i++) {
fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
}
fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl_wl);
fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
for (i = 0; i < ubi->fm_wl_pool.size; i++) {
fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
}
ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
ubi_for_each_protected_peb(ubi, i, wl_e) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->used_peb_count = cpu_to_be32(used_peb_count);
ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
scrub_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
list_for_each_entry(ubi_wrk, &ubi->works, list) {
if (ubi_is_erase_work(ubi_wrk)) {
wl_e = ubi_wrk->e;
ubi_assert(wl_e);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
erase_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
}
fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
vol_count++;
fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fvh);
ubi_assert(fm_pos <= ubi->fm_size);
fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
fvh->vol_id = cpu_to_be32(vol->vol_id);
fvh->vol_type = vol->vol_type;
fvh->used_ebs = cpu_to_be32(vol->used_ebs);
fvh->data_pad = cpu_to_be32(vol->data_pad);
fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
vol->vol_type == UBI_STATIC_VOLUME);
feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
ubi_assert(fm_pos <= ubi->fm_size);
for (j = 0; j < vol->reserved_pebs; j++) {
struct ubi_eba_leb_desc ldesc;
ubi_eba_get_ldesc(vol, j, &ldesc);
feba->pnum[j] = cpu_to_be32(ldesc.pnum);
}
feba->reserved_pebs = cpu_to_be32(j);
feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
}
fmh->vol_count = cpu_to_be32(vol_count);
fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
avhdr->lnum = 0;
spin_unlock(&ubi->wl_lock);
spin_unlock(&ubi->volumes_lock);
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
}
fmsb->data_crc = 0;
fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
ubi->fm_size));
for (i = 1; i < new_fm->used_blocks; i++) {
dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
goto out_free_seen;
}
}
for (i = 0; i < new_fm->used_blocks; i++) {
ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, 0, ubi->leb_size);
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
goto out_free_seen;
}
}
ubi_assert(new_fm);
ubi->fm = new_fm;
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
out_free_seen:
free_seen(seen_pebs);
out_free_dvbuf:
ubi_free_vid_buf(dvbuf);
out_free_avbuf:
ubi_free_vid_buf(avbuf);
out:
return ret;
}
/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
*
* This function ensures that upon next UBI attach a full scan
* is issued. We need this if UBI is about to write a new fastmap
* but is unable to do so. In this case we have two options:
* a) Make sure that the current fastmap will not be usued upon
* attach time and contine or b) fall back to RO mode to have the
* current fastmap in a valid state.
* Returns 0 on success, < 0 indicates an internal error.
*/
static int invalidate_fastmap(struct ubi_device *ubi)
{
int ret;
struct ubi_fastmap_layout *fm;
struct ubi_wl_entry *e;
struct ubi_vid_io_buf *vb = NULL;
struct ubi_vid_hdr *vh;
if (!ubi->fm)
return 0;
ubi->fm = NULL;
ret = -ENOMEM;
fm = kzalloc(sizeof(*fm), GFP_NOFS);
if (!fm)
goto out;
vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!vb)
goto out_free_fm;
vh = ubi_get_vid_hdr(vb);
ret = -ENOSPC;
e = ubi_wl_get_fm_peb(ubi, 1);
if (!e)
goto out_free_fm;
/*
* Create fake fastmap such that UBI will fall back
* to scanning mode.
*/
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
if (ret < 0) {
ubi_wl_put_fm_peb(ubi, e, 0, 0);
goto out_free_fm;
}
fm->used_blocks = 1;
fm->e[0] = e;
ubi->fm = fm;
out:
ubi_free_vid_buf(vb);
return ret;
out_free_fm:
kfree(fm);
goto out;
}
/**
* return_fm_pebs - returns all PEBs used by a fastmap back to the
* WL sub-system.
* @ubi: UBI device object
* @fm: fastmap layout object
*/
static void return_fm_pebs(struct ubi_device *ubi,
struct ubi_fastmap_layout *fm)
{
int i;
if (!fm)
return;
for (i = 0; i < fm->used_blocks; i++) {
if (fm->e[i]) {
ubi_wl_put_fm_peb(ubi, fm->e[i], i,
fm->to_be_tortured[i]);
fm->e[i] = NULL;
}
}
}
/**
* ubi_update_fastmap - will be called by UBI if a volume changes or
* a fastmap pool becomes full.
* @ubi: UBI device object
*
* Returns 0 on success, < 0 indicates an internal error.
*/
int ubi_update_fastmap(struct ubi_device *ubi)
{
int ret, i, j;
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
ubi_refill_pools_and_lock(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return 0;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return -ENOMEM;
}
new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
old_fm = ubi->fm;
ubi->fm = NULL;
if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
ubi_err(ubi, "fastmap too large");
ret = -ENOSPC;
goto err;
}
for (i = 1; i < new_fm->used_blocks; i++) {
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 0);
spin_unlock(&ubi->wl_lock);
if (!tmp_e) {
if (old_fm && old_fm->e[i]) {
ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
for (j = 1; j < i; j++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[j],
j, 0);
new_fm->e[j] = NULL;
}
goto err;
}
new_fm->e[i] = old_fm->e[i];
old_fm->e[i] = NULL;
} else {
ubi_err(ubi, "could not get any free erase block");
for (j = 1; j < i; j++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
new_fm->e[j] = NULL;
}
ret = -ENOSPC;
goto err;
}
} else {
new_fm->e[i] = tmp_e;
if (old_fm && old_fm->e[i]) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
}
/* Old fastmap is larger than the new one */
if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
spin_lock(&ubi->wl_lock);
tmp_e = ubi->fm_anchor;
ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
i, 0);
new_fm->e[i] = NULL;
}
goto err;
}
new_fm->e[0] = old_fm->e[0];
old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
old_fm->to_be_tortured[0]);
new_fm->e[0] = tmp_e;
old_fm->e[0] = NULL;
}
} else {
if (!tmp_e) {
ubi_err(ubi, "could not find any anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
new_fm->e[i] = NULL;
}
ret = -ENOSPC;
goto err;
}
new_fm->e[0] = tmp_e;
}
ret = ubi_write_fastmap(ubi, new_fm);
if (ret)
goto err;
out_unlock:
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
ubi_ensure_anchor_pebs(ubi);
return ret;
err:
ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
ret = invalidate_fastmap(ubi);
if (ret < 0) {
ubi_err(ubi, "Unable to invalidate current fastmap!");
ubi_ro_mode(ubi);
} else {
return_fm_pebs(ubi, old_fm);
return_fm_pebs(ubi, new_fm);
ret = 0;
}
kfree(new_fm);
goto out_unlock;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Bridge between MCE and APEI
*
* On some machine, corrected memory errors are reported via APEI
* generic hardware error source (GHES) instead of corrected Machine
* Check. These corrected memory errors can be reported to user space
* through /dev/mcelog via faking a corrected Machine Check, so that
* the error memory page can be offlined by /sbin/mcelog if the error
* count for one page is beyond the threshold.
*
* For fatal MCE, save MCE record into persistent storage via ERST, so
* that the MCE record can be logged after reboot via ERST.
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <[email protected]>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/cper.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
#include <asm/mce.h>
#include "internal.h"
void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
{
struct mce_hw_err err;
struct mce *m;
int lsb;
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
/*
* Even if the ->validation_bits are set for address mask,
* to be extra safe, check and reject an error radius '0',
* and fall back to the default page size.
*/
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
lsb = find_first_bit((void *)&mem_err->physical_addr_mask, PAGE_SHIFT);
else
lsb = PAGE_SHIFT;
mce_prep_record(&err);
m = &err.m;
m->bank = -1;
/* Fake a memory read error with unknown channel */
m->status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
m->misc = (MCI_MISC_ADDR_PHYS << 6) | lsb;
if (severity >= GHES_SEV_RECOVERABLE)
m->status |= MCI_STATUS_UC;
if (severity >= GHES_SEV_PANIC) {
m->status |= MCI_STATUS_PCC;
m->tsc = rdtsc();
}
m->addr = mem_err->physical_addr;
mce_log(&err);
}
EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
{
const u64 *i_mce = ((const u64 *) (ctx_info + 1));
unsigned int cpu, num_regs;
bool apicid_found = false;
struct mce_hw_err err;
struct mce *m;
if (!boot_cpu_has(X86_FEATURE_SMCA))
return -EINVAL;
/*
* The starting address of the register array extracted from BERT must
* match with the first expected register in the register layout of
* SMCA address space. This address corresponds to banks's MCA_STATUS
* register.
*
* Match any MCi_STATUS register by turning off bank numbers.
*/
if ((ctx_info->msr_addr & MSR_AMD64_SMCA_MC0_STATUS) !=
MSR_AMD64_SMCA_MC0_STATUS)
return -EINVAL;
/*
* The number of registers in the register array is determined by
* Register Array Size/8 as defined in UEFI spec v2.8, sec N.2.4.2.2.
* Sanity-check registers array size.
*/
num_regs = ctx_info->reg_arr_size >> 3;
if (!num_regs)
return -EINVAL;
for_each_possible_cpu(cpu) {
if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
apicid_found = true;
break;
}
}
if (!apicid_found)
return -EINVAL;
m = &err.m;
memset(&err, 0, sizeof(struct mce_hw_err));
mce_prep_record_common(m);
mce_prep_record_per_cpu(cpu, m);
m->bank = (ctx_info->msr_addr >> 4) & 0xFF;
/*
* The SMCA register layout is fixed and includes 16 registers.
* The end of the array may be variable, but the beginning is known.
* Cap the number of registers to expected max (15).
*/
if (num_regs > 15)
num_regs = 15;
switch (num_regs) {
/* MCA_SYND2 */
case 15:
err.vendor.amd.synd2 = *(i_mce + 14);
fallthrough;
/* MCA_SYND1 */
case 14:
err.vendor.amd.synd1 = *(i_mce + 13);
fallthrough;
/* MCA_MISC4 */
case 13:
/* MCA_MISC3 */
case 12:
/* MCA_MISC2 */
case 11:
/* MCA_MISC1 */
case 10:
/* MCA_DEADDR */
case 9:
/* MCA_DESTAT */
case 8:
/* reserved */
case 7:
/* MCA_SYND */
case 6:
m->synd = *(i_mce + 5);
fallthrough;
/* MCA_IPID */
case 5:
m->ipid = *(i_mce + 4);
fallthrough;
/* MCA_CONFIG */
case 4:
/* MCA_MISC0 */
case 3:
m->misc = *(i_mce + 2);
fallthrough;
/* MCA_ADDR */
case 2:
m->addr = *(i_mce + 1);
fallthrough;
/* MCA_STATUS */
case 1:
m->status = *i_mce;
}
mce_log(&err);
return 0;
}
#define CPER_CREATOR_MCE \
GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
0x64, 0x90, 0xb8, 0x9d)
#define CPER_SECTION_TYPE_MCE \
GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
0x04, 0x4a, 0x38, 0xfc)
/*
* CPER specification (in UEFI specification 2.3 appendix N) requires
* byte-packed.
*/
struct cper_mce_record {
struct cper_record_header hdr;
struct cper_section_descriptor sec_hdr;
struct mce mce;
} __packed;
int apei_write_mce(struct mce *m)
{
struct cper_mce_record rcd;
memset(&rcd, 0, sizeof(rcd));
memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
rcd.hdr.revision = CPER_RECORD_REV;
rcd.hdr.signature_end = CPER_SIG_END;
rcd.hdr.section_count = 1;
rcd.hdr.error_severity = CPER_SEV_FATAL;
/* timestamp, platform_id, partition_id are all invalid */
rcd.hdr.validation_bits = 0;
rcd.hdr.record_length = sizeof(rcd);
rcd.hdr.creator_id = CPER_CREATOR_MCE;
rcd.hdr.notification_type = CPER_NOTIFY_MCE;
rcd.hdr.record_id = cper_next_record_id();
rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
rcd.sec_hdr.section_length = sizeof(rcd.mce);
rcd.sec_hdr.revision = CPER_SEC_REV;
/* fru_id and fru_text is invalid */
rcd.sec_hdr.validation_bits = 0;
rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
rcd.sec_hdr.section_severity = CPER_SEV_FATAL;
memcpy(&rcd.mce, m, sizeof(*m));
return erst_write(&rcd.hdr);
}
ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
struct cper_mce_record rcd;
int rc, pos;
rc = erst_get_record_id_begin(&pos);
if (rc)
return rc;
retry:
rc = erst_get_record_id_next(&pos, record_id);
if (rc)
goto out;
/* no more record */
if (*record_id == APEI_ERST_INVALID_RECORD_ID)
goto out;
rc = erst_read_record(*record_id, &rcd.hdr, sizeof(rcd), sizeof(rcd),
&CPER_CREATOR_MCE);
/* someone else has cleared the record, try next one */
if (rc == -ENOENT)
goto retry;
else if (rc < 0)
goto out;
memcpy(m, &rcd.mce, sizeof(*m));
rc = sizeof(*m);
out:
erst_get_record_id_end();
return rc;
}
/* Check whether there is record in ERST */
int apei_check_mce(void)
{
return erst_get_record_count();
}
int apei_clear_mce(u64 record_id)
{
return erst_clear(record_id);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Ioctl to enable verity on a file
*
* Copyright 2019 Google LLC
*/
#include "fsverity_private.h"
#include <crypto/hash.h>
#include <linux/mount.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
struct block_buffer {
u32 filled;
bool is_root_hash;
u8 *data;
};
/* Hash a block, writing the result to the next level's pending block buffer. */
static int hash_one_block(struct inode *inode,
const struct merkle_tree_params *params,
struct block_buffer *cur)
{
struct block_buffer *next = cur + 1;
int err;
/*
* Safety check to prevent a buffer overflow in case of a filesystem bug
* that allows the file size to change despite deny_write_access(), or a
* bug in the Merkle tree logic itself
*/
if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
return -EINVAL;
/* Zero-pad the block if it's shorter than the block size. */
memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
err = fsverity_hash_block(params, inode, cur->data,
&next->data[next->filled]);
if (err)
return err;
next->filled += params->digest_size;
cur->filled = 0;
return 0;
}
static int write_merkle_tree_block(struct inode *inode, const u8 *buf,
unsigned long index,
const struct merkle_tree_params *params)
{
u64 pos = (u64)index << params->log_blocksize;
int err;
err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos,
params->block_size);
if (err)
fsverity_err(inode, "Error %d writing Merkle tree block %lu",
err, index);
return err;
}
/*
* Build the Merkle tree for the given file using the given parameters, and
* return the root hash in @root_hash.
*
* The tree is written to a filesystem-specific location as determined by the
* ->write_merkle_tree_block() method. However, the blocks that comprise the
* tree are the same for all filesystems.
*/
static int build_merkle_tree(struct file *filp,
const struct merkle_tree_params *params,
u8 *root_hash)
{
struct inode *inode = file_inode(filp);
const u64 data_size = inode->i_size;
const int num_levels = params->num_levels;
struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
struct block_buffer *buffers = &_buffers[1];
unsigned long level_offset[FS_VERITY_MAX_LEVELS];
int level;
u64 offset;
int err;
if (data_size == 0) {
/* Empty file is a special case; root hash is all 0's */
memset(root_hash, 0, params->digest_size);
return 0;
}
/*
* Allocate the block buffers. Buffer "-1" is for data blocks.
* Buffers 0 <= level < num_levels are for the actual tree levels.
* Buffer 'num_levels' is for the root hash.
*/
for (level = -1; level < num_levels; level++) {
buffers[level].data = kzalloc(params->block_size, GFP_KERNEL);
if (!buffers[level].data) {
err = -ENOMEM;
goto out;
}
}
buffers[num_levels].data = root_hash;
buffers[num_levels].is_root_hash = true;
BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
memcpy(level_offset, params->level_start, sizeof(level_offset));
/* Hash each data block, also hashing the tree blocks as they fill up */
for (offset = 0; offset < data_size; offset += params->block_size) {
ssize_t bytes_read;
loff_t pos = offset;
buffers[-1].filled = min_t(u64, params->block_size,
data_size - offset);
bytes_read = __kernel_read(filp, buffers[-1].data,
buffers[-1].filled, &pos);
if (bytes_read < 0) {
err = bytes_read;
fsverity_err(inode, "Error %d reading file data", err);
goto out;
}
if (bytes_read != buffers[-1].filled) {
err = -EINVAL;
fsverity_err(inode, "Short read of file data");
goto out;
}
err = hash_one_block(inode, params, &buffers[-1]);
if (err)
goto out;
for (level = 0; level < num_levels; level++) {
if (buffers[level].filled + params->digest_size <=
params->block_size) {
/* Next block at @level isn't full yet */
break;
}
/* Next block at @level is full */
err = hash_one_block(inode, params, &buffers[level]);
if (err)
goto out;
err = write_merkle_tree_block(inode,
buffers[level].data,
level_offset[level],
params);
if (err)
goto out;
level_offset[level]++;
}
if (fatal_signal_pending(current)) {
err = -EINTR;
goto out;
}
cond_resched();
}
/* Finish all nonempty pending tree blocks. */
for (level = 0; level < num_levels; level++) {
if (buffers[level].filled != 0) {
err = hash_one_block(inode, params, &buffers[level]);
if (err)
goto out;
err = write_merkle_tree_block(inode,
buffers[level].data,
level_offset[level],
params);
if (err)
goto out;
}
}
/* The root hash was filled by the last call to hash_one_block(). */
if (WARN_ON_ONCE(buffers[num_levels].filled != params->digest_size)) {
err = -EINVAL;
goto out;
}
err = 0;
out:
for (level = -1; level < num_levels; level++)
kfree(buffers[level].data);
return err;
}
static int enable_verity(struct file *filp,
const struct fsverity_enable_arg *arg)
{
struct inode *inode = file_inode(filp);
const struct fsverity_operations *vops = inode->i_sb->s_vop;
struct merkle_tree_params params = { };
struct fsverity_descriptor *desc;
size_t desc_size = struct_size(desc, signature, arg->sig_size);
struct fsverity_info *vi;
int err;
/* Start initializing the fsverity_descriptor */
desc = kzalloc(desc_size, GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->version = 1;
desc->hash_algorithm = arg->hash_algorithm;
desc->log_blocksize = ilog2(arg->block_size);
/* Get the salt if the user provided one */
if (arg->salt_size &&
copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
arg->salt_size)) {
err = -EFAULT;
goto out;
}
desc->salt_size = arg->salt_size;
/* Get the builtin signature if the user provided one */
if (arg->sig_size &&
copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
arg->sig_size)) {
err = -EFAULT;
goto out;
}
desc->sig_size = cpu_to_le32(arg->sig_size);
desc->data_size = cpu_to_le64(inode->i_size);
/* Prepare the Merkle tree parameters */
err = fsverity_init_merkle_tree_params(¶ms, inode,
arg->hash_algorithm,
desc->log_blocksize,
desc->salt, desc->salt_size);
if (err)
goto out;
/*
* Start enabling verity on this file, serialized by the inode lock.
* Fail if verity is already enabled or is already being enabled.
*/
inode_lock(inode);
if (IS_VERITY(inode))
err = -EEXIST;
else
err = vops->begin_enable_verity(filp);
inode_unlock(inode);
if (err)
goto out;
/*
* Build the Merkle tree. Don't hold the inode lock during this, since
* on huge files this may take a very long time and we don't want to
* force unrelated syscalls like chown() to block forever. We don't
* need the inode lock here because deny_write_access() already prevents
* the file from being written to or truncated, and we still serialize
* ->begin_enable_verity() and ->end_enable_verity() using the inode
* lock and only allow one process to be here at a time on a given file.
*/
BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
err = build_merkle_tree(filp, ¶ms, desc->root_hash);
if (err) {
fsverity_err(inode, "Error %d building Merkle tree", err);
goto rollback;
}
/*
* Create the fsverity_info. Don't bother trying to save work by
* reusing the merkle_tree_params from above. Instead, just create the
* fsverity_info from the fsverity_descriptor as if it were just loaded
* from disk. This is simpler, and it serves as an extra check that the
* metadata we're writing is valid before actually enabling verity.
*/
vi = fsverity_create_info(inode, desc);
if (IS_ERR(vi)) {
err = PTR_ERR(vi);
goto rollback;
}
/*
* Tell the filesystem to finish enabling verity on the file.
* Serialized with ->begin_enable_verity() by the inode lock.
*/
inode_lock(inode);
err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
inode_unlock(inode);
if (err) {
fsverity_err(inode, "%ps() failed with err %d",
vops->end_enable_verity, err);
fsverity_free_info(vi);
} else if (WARN_ON_ONCE(!IS_VERITY(inode))) {
err = -EINVAL;
fsverity_free_info(vi);
} else {
/* Successfully enabled verity */
/*
* Readers can start using ->i_verity_info immediately, so it
* can't be rolled back once set. So don't set it until just
* after the filesystem has successfully enabled verity.
*/
fsverity_set_info(inode, vi);
}
out:
kfree(params.hashstate);
kfree(desc);
return err;
rollback:
inode_lock(inode);
(void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
inode_unlock(inode);
goto out;
}
/**
* fsverity_ioctl_enable() - enable verity on a file
* @filp: file to enable verity on
* @uarg: user pointer to fsverity_enable_arg
*
* Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
* Documentation/filesystems/fsverity.rst for the documentation.
*
* Return: 0 on success, -errno on failure
*/
int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
{
struct inode *inode = file_inode(filp);
struct fsverity_enable_arg arg;
int err;
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
if (arg.version != 1)
return -EINVAL;
if (arg.__reserved1 ||
memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
return -EINVAL;
if (!is_power_of_2(arg.block_size))
return -EINVAL;
if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
return -EMSGSIZE;
if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
return -EMSGSIZE;
/*
* Require a regular file with write access. But the actual fd must
* still be readonly so that we can lock out all writers. This is
* needed to guarantee that no writable fds exist to the file once it
* has verity enabled, and to stabilize the data being hashed.
*/
err = file_permission(filp, MAY_WRITE);
if (err)
return err;
/*
* __kernel_read() is used while building the Merkle tree. So, we can't
* allow file descriptors that were opened for ioctl access only, using
* the special nonstandard access mode 3. O_RDONLY only, please!
*/
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
if (IS_APPEND(inode))
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EISDIR;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
err = mnt_want_write_file(filp);
if (err) /* -EROFS */
return err;
err = deny_write_access(filp);
if (err) /* -ETXTBSY */
goto out_drop_write;
err = enable_verity(filp, &arg);
/*
* We no longer drop the inode's pagecache after enabling verity. This
* used to be done to try to avoid a race condition where pages could be
* evicted after being used in the Merkle tree construction, then
* re-instantiated by a concurrent read. Such pages are unverified, and
* the backing storage could have filled them with different content, so
* they shouldn't be used to fulfill reads once verity is enabled.
*
* But, dropping the pagecache has a big performance impact, and it
* doesn't fully solve the race condition anyway. So for those reasons,
* and also because this race condition isn't very important relatively
* speaking (especially for small-ish files, where the chance of a page
* being used, evicted, *and* re-instantiated all while enabling verity
* is quite small), we no longer drop the inode's pagecache.
*/
/*
* allow_write_access() is needed to pair with deny_write_access().
* Regardless, the filesystem won't allow writing to verity files.
*/
allow_write_access(filp);
out_drop_write:
mnt_drop_write_file(filp);
return err;
}
EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2015-2020 Intel Corporation.
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
#include "sysfs_local.h"
/*
* Slave sysfs
*/
/*
* The sysfs for Slave reflects the MIPI description as given
* in the MIPI DisCo spec.
* status and device_number come directly from the MIPI SoundWire
* 1.x specification.
*
* Base file is device
* |---- status
* |---- device_number
* |---- modalias
* |---- dev-properties
* |---- mipi_revision
* |---- wake_capable
* |---- test_mode_capable
* |---- clk_stop_mode1
* |---- simple_clk_stop_capable
* |---- clk_stop_timeout
* |---- ch_prep_timeout
* |---- reset_behave
* |---- high_PHY_capable
* |---- paging_support
* |---- bank_delay_support
* |---- p15_behave
* |---- master_count
* |---- source_ports
* |---- sink_ports
* |---- dp0
* |---- max_word
* |---- min_word
* |---- words
* |---- BRA_flow_controlled
* |---- simple_ch_prep_sm
* |---- imp_def_interrupts
* |---- dpN_<sink/src>
* |---- max_word
* |---- min_word
* |---- words
* |---- type
* |---- max_grouping
* |---- simple_ch_prep_sm
* |---- ch_prep_timeout
* |---- imp_def_interrupts
* |---- min_ch
* |---- max_ch
* |---- channels
* |---- ch_combinations
* |---- max_async_buffer
* |---- block_pack_mode
* |---- port_encoding
*
*/
#define sdw_slave_attr(field, format_string) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct sdw_slave *slave = dev_to_sdw_dev(dev); \
return sprintf(buf, format_string, slave->prop.field); \
} \
static DEVICE_ATTR_RO(field)
sdw_slave_attr(mipi_revision, "0x%x\n");
sdw_slave_attr(wake_capable, "%d\n");
sdw_slave_attr(test_mode_capable, "%d\n");
sdw_slave_attr(clk_stop_mode1, "%d\n");
sdw_slave_attr(simple_clk_stop_capable, "%d\n");
sdw_slave_attr(clk_stop_timeout, "%d\n");
sdw_slave_attr(ch_prep_timeout, "%d\n");
sdw_slave_attr(reset_behave, "%d\n");
sdw_slave_attr(high_PHY_capable, "%d\n");
sdw_slave_attr(paging_support, "%d\n");
sdw_slave_attr(bank_delay_support, "%d\n");
sdw_slave_attr(p15_behave, "%d\n");
sdw_slave_attr(master_count, "%d\n");
sdw_slave_attr(source_ports, "0x%x\n");
sdw_slave_attr(sink_ports, "0x%x\n");
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
return sdw_slave_modalias(slave, buf, 256);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *slave_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
static const struct attribute_group slave_attr_group = {
.attrs = slave_attrs,
};
static struct attribute *slave_dev_attrs[] = {
&dev_attr_mipi_revision.attr,
&dev_attr_wake_capable.attr,
&dev_attr_test_mode_capable.attr,
&dev_attr_clk_stop_mode1.attr,
&dev_attr_simple_clk_stop_capable.attr,
&dev_attr_clk_stop_timeout.attr,
&dev_attr_ch_prep_timeout.attr,
&dev_attr_reset_behave.attr,
&dev_attr_high_PHY_capable.attr,
&dev_attr_paging_support.attr,
&dev_attr_bank_delay_support.attr,
&dev_attr_p15_behave.attr,
&dev_attr_master_count.attr,
&dev_attr_source_ports.attr,
&dev_attr_sink_ports.attr,
NULL,
};
static const struct attribute_group sdw_slave_dev_attr_group = {
.attrs = slave_dev_attrs,
.name = "dev-properties",
};
/*
* DP0 sysfs
*/
#define sdw_dp0_attr(field, format_string) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct sdw_slave *slave = dev_to_sdw_dev(dev); \
return sprintf(buf, format_string, slave->prop.dp0_prop->field);\
} \
static DEVICE_ATTR_RO(field)
sdw_dp0_attr(max_word, "%d\n");
sdw_dp0_attr(min_word, "%d\n");
sdw_dp0_attr(BRA_flow_controlled, "%d\n");
sdw_dp0_attr(simple_ch_prep_sm, "%d\n");
sdw_dp0_attr(imp_def_interrupts, "0x%x\n");
static ssize_t words_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
ssize_t size = 0;
int i;
for (i = 0; i < slave->prop.dp0_prop->num_words; i++)
size += sprintf(buf + size, "%d ",
slave->prop.dp0_prop->words[i]);
size += sprintf(buf + size, "\n");
return size;
}
static DEVICE_ATTR_RO(words);
static struct attribute *dp0_attrs[] = {
&dev_attr_max_word.attr,
&dev_attr_min_word.attr,
&dev_attr_words.attr,
&dev_attr_BRA_flow_controlled.attr,
&dev_attr_simple_ch_prep_sm.attr,
&dev_attr_imp_def_interrupts.attr,
NULL,
};
static umode_t dp0_attr_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
struct sdw_slave *slave = dev_to_sdw_dev(kobj_to_dev(kobj));
if (slave->prop.dp0_prop)
return attr->mode;
return 0;
}
static bool dp0_group_visible(struct kobject *kobj)
{
struct sdw_slave *slave = dev_to_sdw_dev(kobj_to_dev(kobj));
if (slave->prop.dp0_prop)
return true;
return false;
}
DEFINE_SYSFS_GROUP_VISIBLE(dp0);
static const struct attribute_group dp0_group = {
.attrs = dp0_attrs,
.is_visible = SYSFS_GROUP_VISIBLE(dp0),
.name = "dp0",
};
const struct attribute_group *sdw_attr_groups[] = {
&slave_attr_group,
&sdw_slave_dev_attr_group,
&dp0_group,
NULL,
};
/*
* the status is shown in capital letters for UNATTACHED and RESERVED
* on purpose, to highlight users to the fact that these status values
* are not expected.
*/
static const char *const slave_status[] = {
[SDW_SLAVE_UNATTACHED] = "UNATTACHED",
[SDW_SLAVE_ATTACHED] = "Attached",
[SDW_SLAVE_ALERT] = "Alert",
[SDW_SLAVE_RESERVED] = "RESERVED",
};
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
return sprintf(buf, "%s\n", slave_status[slave->status]);
}
static DEVICE_ATTR_RO(status);
static ssize_t device_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
if (slave->status == SDW_SLAVE_UNATTACHED)
return sprintf(buf, "%s", "N/A");
else
return sprintf(buf, "%d", slave->dev_num);
}
static DEVICE_ATTR_RO(device_number);
static struct attribute *slave_status_attrs[] = {
&dev_attr_status.attr,
&dev_attr_device_number.attr,
NULL,
};
/*
* we don't use ATTRIBUTES_GROUP here since the group is used in a
* separate file and can't be handled as a static.
*/
static const struct attribute_group sdw_slave_status_attr_group = {
.attrs = slave_status_attrs,
};
const struct attribute_group *sdw_slave_status_attr_groups[] = {
&sdw_slave_status_attr_group,
NULL
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dts file for AppliedMicro (APM) Merlin Board
*
* Copyright (C) 2015, Applied Micro Circuits Corporation
*/
/dts-v1/;
/include/ "apm-shadowcat.dtsi"
/ {
model = "APM X-Gene Merlin board";
compatible = "apm,merlin", "apm,xgene-shadowcat";
chosen { };
memory@100000000 {
device_type = "memory";
reg = < 0x1 0x00000000 0x0 0x80000000 >;
};
gpio-keys {
compatible = "gpio-keys";
button {
label = "POWER";
linux,code = <116>;
linux,input-type = <0x1>;
interrupt-parent = <&sbgpio>;
interrupts = <0x0 0x1>;
};
};
poweroff_mbox: poweroff_mbox@10548000 {
compatible = "apm,merlin-poweroff-mailbox", "syscon";
reg = <0x0 0x10548000 0x0 0x30>;
};
poweroff: poweroff@10548010 {
compatible = "syscon-poweroff";
regmap = <&poweroff_mbox>;
offset = <0x10>;
mask = <0x1>;
};
};
&serial0 {
status = "okay";
};
&sata1 {
status = "okay";
};
&sata2 {
status = "okay";
};
&sata3 {
status = "okay";
};
&sgenet0 {
status = "okay";
};
&xgenet1 {
status = "okay";
};
&mmc0 {
status = "okay";
};
&i2c4 {
rtc68: rtc@68 {
compatible = "dallas,ds1337";
reg = <0x68>;
status = "okay";
};
};
&mdio {
sgenet0phy: phy@0 {
reg = <0x0>;
};
};
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include "speakup.h"
#include "spk_types.h"
#include "spk_priv.h"
struct spk_ldisc_data {
char buf;
struct completion completion;
bool buf_free;
struct spk_synth *synth;
};
/*
* This allows to catch within spk_ttyio_ldisc_open whether it is getting set
* on for a speakup-driven device.
*/
static struct tty_struct *speakup_tty;
/* This mutex serializes the use of such global speakup_tty variable */
static DEFINE_MUTEX(speakup_tty_mutex);
static int ser_to_dev(int ser, dev_t *dev_no)
{
if (ser < 0 || ser > (255 - 64)) {
pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n");
return -EINVAL;
}
*dev_no = MKDEV(4, (64 + ser));
return 0;
}
static int get_dev_to_use(struct spk_synth *synth, dev_t *dev_no)
{
/* use ser only when dev is not specified */
if (strcmp(synth->dev_name, SYNTH_DEFAULT_DEV) ||
synth->ser == SYNTH_DEFAULT_SER)
return tty_dev_name_to_number(synth->dev_name, dev_no);
return ser_to_dev(synth->ser, dev_no);
}
static int spk_ttyio_ldisc_open(struct tty_struct *tty)
{
struct spk_ldisc_data *ldisc_data;
if (tty != speakup_tty)
/* Somebody tried to use this line discipline outside speakup */
return -ENODEV;
if (!tty->ops->write)
return -EOPNOTSUPP;
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
if (!ldisc_data)
return -ENOMEM;
init_completion(&ldisc_data->completion);
ldisc_data->buf_free = true;
tty->disc_data = ldisc_data;
return 0;
}
static void spk_ttyio_ldisc_close(struct tty_struct *tty)
{
kfree(tty->disc_data);
}
static size_t spk_ttyio_receive_buf2(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
struct spk_synth *synth = ldisc_data->synth;
if (synth->read_buff_add) {
unsigned int i;
for (i = 0; i < count; i++)
synth->read_buff_add(cp[i]);
return count;
}
if (!ldisc_data->buf_free)
/* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
* buf_free == true and overwrite buf
*/
mb();
ldisc_data->buf = cp[0];
ldisc_data->buf_free = false;
complete(&ldisc_data->completion);
return 1;
}
static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
.owner = THIS_MODULE,
.num = N_SPEAKUP,
.name = "speakup_ldisc",
.open = spk_ttyio_ldisc_open,
.close = spk_ttyio_ldisc_close,
.receive_buf2 = spk_ttyio_receive_buf2,
};
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch);
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch);
static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch);
static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
static unsigned char spk_ttyio_in(struct spk_synth *in_synth);
static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth);
static void spk_ttyio_flush_buffer(struct spk_synth *in_synth);
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
.synth_out = spk_ttyio_out,
.synth_out_unicode = spk_ttyio_out_unicode,
.send_xchar = spk_ttyio_send_xchar,
.tiocmset = spk_ttyio_tiocmset,
.synth_in = spk_ttyio_in,
.synth_in_nowait = spk_ttyio_in_nowait,
.flush_buffer = spk_ttyio_flush_buffer,
.wait_for_xmitr = spk_ttyio_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_ttyio_ops);
static inline void get_termios(struct tty_struct *tty,
struct ktermios *out_termios)
{
down_read(&tty->termios_rwsem);
*out_termios = tty->termios;
up_read(&tty->termios_rwsem);
}
static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
{
int ret = 0;
struct tty_struct *tty;
struct ktermios tmp_termios;
dev_t dev;
ret = get_dev_to_use(synth, &dev);
if (ret)
return ret;
tty = tty_kopen_exclusive(dev);
if (IS_ERR(tty))
return PTR_ERR(tty);
if (tty->ops->open)
ret = tty->ops->open(tty, NULL);
else
ret = -ENODEV;
if (ret) {
tty_unlock(tty);
return ret;
}
clear_bit(TTY_HUPPED, &tty->flags);
/* ensure hardware flow control is enabled */
get_termios(tty, &tmp_termios);
if (!(tmp_termios.c_cflag & CRTSCTS)) {
tmp_termios.c_cflag |= CRTSCTS;
tty_set_termios(tty, &tmp_termios);
/*
* check c_cflag to see if it's updated as tty_set_termios
* may not return error even when no tty bits are
* changed by the request.
*/
get_termios(tty, &tmp_termios);
if (!(tmp_termios.c_cflag & CRTSCTS))
pr_warn("speakup: Failed to set hardware flow control\n");
}
tty_unlock(tty);
mutex_lock(&speakup_tty_mutex);
speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
speakup_tty = NULL;
mutex_unlock(&speakup_tty_mutex);
if (!ret) {
/* Success */
struct spk_ldisc_data *ldisc_data = tty->disc_data;
ldisc_data->synth = synth;
synth->dev = tty;
return 0;
}
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
tty_unlock(tty);
tty_kclose(tty);
return ret;
}
void spk_ttyio_register_ldisc(void)
{
if (tty_register_ldisc(&spk_ttyio_ldisc_ops))
pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
}
void spk_ttyio_unregister_ldisc(void)
{
tty_unregister_ldisc(&spk_ttyio_ldisc_ops);
}
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
struct tty_struct *tty = in_synth->dev;
int ret;
if (!in_synth->alive || !tty->ops->write)
return 0;
ret = tty->ops->write(tty, &ch, 1);
if (ret == 0)
/* No room */
return 0;
if (ret > 0)
/* Success */
return 1;
pr_warn("%s: I/O error, deactivating speakup\n",
in_synth->long_name);
/* No synth any more, so nobody will restart TTYs,
* and we thus need to do it ourselves. Now that there
* is no synth we can let application flood anyway
*/
in_synth->alive = 0;
speakup_start_ttys();
return 0;
}
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch)
{
int ret;
if (ch < 0x80) {
ret = spk_ttyio_out(in_synth, ch);
} else if (ch < 0x800) {
ret = spk_ttyio_out(in_synth, 0xc0 | (ch >> 6));
ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f));
} else {
ret = spk_ttyio_out(in_synth, 0xe0 | (ch >> 12));
ret &= spk_ttyio_out(in_synth, 0x80 | ((ch >> 6) & 0x3f));
ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f));
}
return ret;
}
static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->send_xchar)
tty->ops->send_xchar(tty, ch);
}
static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->tiocmset)
tty->ops->tiocmset(tty, set, clear);
}
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
{
return 1;
}
static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
{
struct tty_struct *tty = in_synth->dev;
struct spk_ldisc_data *ldisc_data = tty->disc_data;
char rv;
if (!timeout) {
if (!try_wait_for_completion(&ldisc_data->completion))
return 0xff;
} else if (wait_for_completion_timeout(&ldisc_data->completion,
usecs_to_jiffies(timeout)) == 0) {
pr_warn("spk_ttyio: timeout (%d) while waiting for input\n",
timeout);
return 0xff;
}
rv = ldisc_data->buf;
/* Make sure we have read buf before we set buf_free to let
* the producer overwrite it
*/
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
tty_flip_buffer_push(tty->port);
return rv;
}
static unsigned char spk_ttyio_in(struct spk_synth *in_synth)
{
return ttyio_in(in_synth, SPK_SYNTH_TIMEOUT);
}
static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth)
{
u8 rv = ttyio_in(in_synth, 0);
return (rv == 0xff) ? 0 : rv;
}
static void spk_ttyio_flush_buffer(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->flush_buffer)
tty->ops->flush_buffer(tty);
}
int spk_ttyio_synth_probe(struct spk_synth *synth)
{
int rv = spk_ttyio_initialise_ldisc(synth);
if (rv)
return rv;
synth->alive = 1;
return 0;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe);
void spk_ttyio_release(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
if (tty == NULL)
return;
tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
tty_ldisc_flush(tty);
tty_unlock(tty);
tty_kclose(tty);
in_synth->dev = NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
const char *spk_ttyio_synth_immediate(struct spk_synth *in_synth, const char *buff)
{
struct tty_struct *tty = in_synth->dev;
u_char ch;
while ((ch = *buff)) {
if (ch == '\n')
ch = in_synth->procspeech;
if (tty_write_room(tty) < 1 ||
!in_synth->io_ops->synth_out(in_synth, ch))
return buff;
buff++;
}
return NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_immediate);
|
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_SPXX_DEFS_H__
#define __CVMX_SPXX_DEFS_H__
#define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
void __cvmx_interrupt_spxx_int_msk_enable(int index);
union cvmx_spxx_bckprs_cnt {
uint64_t u64;
struct cvmx_spxx_bckprs_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
};
union cvmx_spxx_bist_stat {
uint64_t u64;
struct cvmx_spxx_bist_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t stat2:1;
uint64_t stat1:1;
uint64_t stat0:1;
#else
uint64_t stat0:1;
uint64_t stat1:1;
uint64_t stat2:1;
uint64_t reserved_3_63:61;
#endif
} s;
};
union cvmx_spxx_clk_ctl {
uint64_t u64;
struct cvmx_spxx_clk_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t seetrn:1;
uint64_t reserved_12_15:4;
uint64_t clkdly:5;
uint64_t runbist:1;
uint64_t statdrv:1;
uint64_t statrcv:1;
uint64_t sndtrn:1;
uint64_t drptrn:1;
uint64_t rcvtrn:1;
uint64_t srxdlck:1;
#else
uint64_t srxdlck:1;
uint64_t rcvtrn:1;
uint64_t drptrn:1;
uint64_t sndtrn:1;
uint64_t statrcv:1;
uint64_t statdrv:1;
uint64_t runbist:1;
uint64_t clkdly:5;
uint64_t reserved_12_15:4;
uint64_t seetrn:1;
uint64_t reserved_17_63:47;
#endif
} s;
};
union cvmx_spxx_clk_stat {
uint64_t u64;
struct cvmx_spxx_clk_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t stxcal:1;
uint64_t reserved_9_9:1;
uint64_t srxtrn:1;
uint64_t s4clk1:1;
uint64_t s4clk0:1;
uint64_t d4clk1:1;
uint64_t d4clk0:1;
uint64_t reserved_0_3:4;
#else
uint64_t reserved_0_3:4;
uint64_t d4clk0:1;
uint64_t d4clk1:1;
uint64_t s4clk0:1;
uint64_t s4clk1:1;
uint64_t srxtrn:1;
uint64_t reserved_9_9:1;
uint64_t stxcal:1;
uint64_t reserved_11_63:53;
#endif
} s;
};
union cvmx_spxx_dbg_deskew_ctl {
uint64_t u64;
struct cvmx_spxx_dbg_deskew_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_30_63:34;
uint64_t fallnop:1;
uint64_t fall8:1;
uint64_t reserved_26_27:2;
uint64_t sstep_go:1;
uint64_t sstep:1;
uint64_t reserved_22_23:2;
uint64_t clrdly:1;
uint64_t dec:1;
uint64_t inc:1;
uint64_t mux:1;
uint64_t offset:5;
uint64_t bitsel:5;
uint64_t offdly:6;
uint64_t dllfrc:1;
uint64_t dlldis:1;
#else
uint64_t dlldis:1;
uint64_t dllfrc:1;
uint64_t offdly:6;
uint64_t bitsel:5;
uint64_t offset:5;
uint64_t mux:1;
uint64_t inc:1;
uint64_t dec:1;
uint64_t clrdly:1;
uint64_t reserved_22_23:2;
uint64_t sstep:1;
uint64_t sstep_go:1;
uint64_t reserved_26_27:2;
uint64_t fall8:1;
uint64_t fallnop:1;
uint64_t reserved_30_63:34;
#endif
} s;
};
union cvmx_spxx_dbg_deskew_state {
uint64_t u64;
struct cvmx_spxx_dbg_deskew_state_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t testres:1;
uint64_t unxterm:1;
uint64_t muxsel:2;
uint64_t offset:5;
#else
uint64_t offset:5;
uint64_t muxsel:2;
uint64_t unxterm:1;
uint64_t testres:1;
uint64_t reserved_9_63:55;
#endif
} s;
};
union cvmx_spxx_drv_ctl {
uint64_t u64;
struct cvmx_spxx_drv_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} s;
struct cvmx_spxx_drv_ctl_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t stx4ncmp:4;
uint64_t stx4pcmp:4;
uint64_t srx4cmp:8;
#else
uint64_t srx4cmp:8;
uint64_t stx4pcmp:4;
uint64_t stx4ncmp:4;
uint64_t reserved_16_63:48;
#endif
} cn38xx;
struct cvmx_spxx_drv_ctl_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
uint64_t stx4ncmp:4;
uint64_t stx4pcmp:4;
uint64_t reserved_10_15:6;
uint64_t srx4cmp:10;
#else
uint64_t srx4cmp:10;
uint64_t reserved_10_15:6;
uint64_t stx4pcmp:4;
uint64_t stx4ncmp:4;
uint64_t reserved_24_63:40;
#endif
} cn58xx;
};
union cvmx_spxx_err_ctl {
uint64_t u64;
struct cvmx_spxx_err_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t prtnxa:1;
uint64_t dipcls:1;
uint64_t dippay:1;
uint64_t reserved_4_5:2;
uint64_t errcnt:4;
#else
uint64_t errcnt:4;
uint64_t reserved_4_5:2;
uint64_t dippay:1;
uint64_t dipcls:1;
uint64_t prtnxa:1;
uint64_t reserved_9_63:55;
#endif
} s;
};
union cvmx_spxx_int_dat {
uint64_t u64;
struct cvmx_spxx_int_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t mul:1;
uint64_t reserved_14_30:17;
uint64_t calbnk:2;
uint64_t rsvop:4;
uint64_t prt:8;
#else
uint64_t prt:8;
uint64_t rsvop:4;
uint64_t calbnk:2;
uint64_t reserved_14_30:17;
uint64_t mul:1;
uint64_t reserved_32_63:32;
#endif
} s;
};
union cvmx_spxx_int_msk {
uint64_t u64;
struct cvmx_spxx_int_msk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_63:52;
#endif
} s;
};
union cvmx_spxx_int_reg {
uint64_t u64;
struct cvmx_spxx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t spf:1;
uint64_t reserved_12_30:19;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_30:19;
uint64_t spf:1;
uint64_t reserved_32_63:32;
#endif
} s;
};
union cvmx_spxx_int_sync {
uint64_t u64;
struct cvmx_spxx_int_sync_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_63:52;
#endif
} s;
};
union cvmx_spxx_tpa_acc {
uint64_t u64;
struct cvmx_spxx_tpa_acc_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
};
union cvmx_spxx_tpa_max {
uint64_t u64;
struct cvmx_spxx_tpa_max_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t max:32;
#else
uint64_t max:32;
uint64_t reserved_32_63:32;
#endif
} s;
};
union cvmx_spxx_tpa_sel {
uint64_t u64;
struct cvmx_spxx_tpa_sel_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t prtsel:4;
#else
uint64_t prtsel:4;
uint64_t reserved_4_63:60;
#endif
} s;
};
union cvmx_spxx_trn4_ctl {
uint64_t u64;
struct cvmx_spxx_trn4_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t trntest:1;
uint64_t jitter:3;
uint64_t clr_boot:1;
uint64_t set_boot:1;
uint64_t maxdist:5;
uint64_t macro_en:1;
uint64_t mux_en:1;
#else
uint64_t mux_en:1;
uint64_t macro_en:1;
uint64_t maxdist:5;
uint64_t set_boot:1;
uint64_t clr_boot:1;
uint64_t jitter:3;
uint64_t trntest:1;
uint64_t reserved_13_63:51;
#endif
} s;
};
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/debugfs.h>
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/pgtable.h>
static int ptdump_show(struct seq_file *m, void *v)
{
ptdump_walk_pgd_level_debugfs(m, &init_mm, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
static int ptdump_curknl_show(struct seq_file *m, void *v)
{
if (current->mm->pgd)
ptdump_walk_pgd_level_debugfs(m, current->mm, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
static int ptdump_curusr_show(struct seq_file *m, void *v)
{
if (current->mm->pgd)
ptdump_walk_pgd_level_debugfs(m, current->mm, true);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump_curusr);
#endif
#if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
static int ptdump_efi_show(struct seq_file *m, void *v)
{
if (efi_mm.pgd)
ptdump_walk_pgd_level_debugfs(m, &efi_mm, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump_efi);
#endif
static struct dentry *dir;
static int __init pt_dump_debug_init(void)
{
dir = debugfs_create_dir("page_tables", NULL);
debugfs_create_file("kernel", 0400, dir, NULL, &ptdump_fops);
debugfs_create_file("current_kernel", 0400, dir, NULL,
&ptdump_curknl_fops);
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
debugfs_create_file("current_user", 0400, dir, NULL,
&ptdump_curusr_fops);
#endif
#if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
debugfs_create_file("efi", 0400, dir, NULL, &ptdump_efi_fops);
#endif
return 0;
}
static void __exit pt_dump_debug_exit(void)
{
debugfs_remove_recursive(dir);
}
module_init(pt_dump_debug_init);
module_exit(pt_dump_debug_exit);
MODULE_AUTHOR("Arjan van de Ven <[email protected]>");
MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
|
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
*
* Copyright (C) 2004 Andriy Skulysh <[email protected]>
*/
#include <linux/module.h>
#include <asm/adc.h>
#include <asm/io.h>
int adc_single(unsigned int channel)
{
int off;
unsigned char csr;
if (channel >= 8) return -1;
off = (channel & 0x03) << 2;
csr = __raw_readb(ADCSR);
csr = channel | ADCSR_ADST | ADCSR_CKS;
__raw_writeb(csr, ADCSR);
do {
csr = __raw_readb(ADCSR);
} while ((csr & ADCSR_ADF) == 0);
csr &= ~(ADCSR_ADF | ADCSR_ADST);
__raw_writeb(csr, ADCSR);
return (((__raw_readb(ADDRAH + off) << 8) |
__raw_readb(ADDRAL + off)) >> 6);
}
EXPORT_SYMBOL(adc_single);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Reset driver for the StarFive JH71X0 SoCs
*
* Copyright (C) 2021 Emil Renner Berthing <[email protected]>
*/
#include <linux/bitmap.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
#include "reset-starfive-jh71x0.h"
struct jh71x0_reset {
struct reset_controller_dev rcdev;
/* protect registers against concurrent read-modify-write */
spinlock_t lock;
void __iomem *assert;
void __iomem *status;
const u32 *asserted;
};
static inline struct jh71x0_reset *
jh71x0_reset_from(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct jh71x0_reset, rcdev);
}
static int jh71x0_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
unsigned long offset = id / 32;
u32 mask = BIT(id % 32);
void __iomem *reg_assert = data->assert + offset * sizeof(u32);
void __iomem *reg_status = data->status + offset * sizeof(u32);
u32 done = data->asserted ? data->asserted[offset] & mask : 0;
u32 value;
unsigned long flags;
int ret;
if (!assert)
done ^= mask;
spin_lock_irqsave(&data->lock, flags);
value = readl(reg_assert);
if (assert)
value |= mask;
else
value &= ~mask;
writel(value, reg_assert);
/* if the associated clock is gated, deasserting might otherwise hang forever */
ret = readl_poll_timeout_atomic(reg_status, value, (value & mask) == done, 0, 1000);
spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
static int jh71x0_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return jh71x0_reset_update(rcdev, id, true);
}
static int jh71x0_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return jh71x0_reset_update(rcdev, id, false);
}
static int jh71x0_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
int ret;
ret = jh71x0_reset_assert(rcdev, id);
if (ret)
return ret;
return jh71x0_reset_deassert(rcdev, id);
}
static int jh71x0_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
unsigned long offset = id / 32;
u32 mask = BIT(id % 32);
void __iomem *reg_status = data->status + offset * sizeof(u32);
u32 value = readl(reg_status);
if (!data->asserted)
return !(value & mask);
return !((value ^ data->asserted[offset]) & mask);
}
static const struct reset_control_ops jh71x0_reset_ops = {
.assert = jh71x0_reset_assert,
.deassert = jh71x0_reset_deassert,
.reset = jh71x0_reset_reset,
.status = jh71x0_reset_status,
};
int reset_starfive_jh71x0_register(struct device *dev, struct device_node *of_node,
void __iomem *assert, void __iomem *status,
const u32 *asserted, unsigned int nr_resets,
struct module *owner)
{
struct jh71x0_reset *data;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->rcdev.ops = &jh71x0_reset_ops;
data->rcdev.owner = owner;
data->rcdev.nr_resets = nr_resets;
data->rcdev.dev = dev;
data->rcdev.of_node = of_node;
spin_lock_init(&data->lock);
data->assert = assert;
data->status = status;
data->asserted = asserted;
return devm_reset_controller_register(dev, &data->rcdev);
}
EXPORT_SYMBOL_GPL(reset_starfive_jh71x0_register);
|
/* SPDX-License-Identifier: LGPL-2.1 */
/*
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
* Jeremy Allison ([email protected])
*
*/
#ifndef _CIFS_GLOB_H
#define _CIFS_GLOB_H
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/sched/mm.h>
#include <linux/netfs.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
#include <uapi/linux/cifs/cifs_mount.h>
#include "../common/smb2pdu.h"
#include "smb2pdu.h"
#include <linux/filelock.h>
#define SMB_PATH_MAX 260
#define CIFS_PORT 445
#define RFC1001_PORT 139
/*
* The sizes of various internal tables and strings
*/
#define MAX_UID_INFO 16
#define MAX_SES_INFO 2
#define MAX_TCON_INFO 4
#define MAX_TREE_SIZE (2 + CIFS_NI_MAXHOST + 1 + CIFS_MAX_SHARE_LEN + 1)
#define CIFS_MIN_RCV_POOL 4
#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
/*
* default attribute cache timeout (jiffies)
*/
#define CIFS_DEF_ACTIMEO (1 * HZ)
/*
* max sleep time before retry to server
*/
#define CIFS_MAX_SLEEP 2000
/*
* max attribute cache timeout (jiffies) - 2^30
*/
#define CIFS_MAX_ACTIMEO (1 << 30)
/*
* Max persistent and resilient handle timeout (milliseconds).
* Windows durable max was 960000 (16 minutes)
*/
#define SMB3_MAX_HANDLE_TIMEOUT 960000
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurrently.
*/
#define CIFS_MAX_REQ 32767
#define RFC1001_NAME_LEN 15
#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
/* maximum length of ip addr as a string (including ipv6 and sctp) */
#define SERVER_NAME_LENGTH 80
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
/* echo interval in seconds */
#define SMB_ECHO_INTERVAL_MIN 1
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
/* smb multichannel query server interfaces interval in seconds */
#define SMB_INTERFACE_POLL_INTERVAL 600
/* maximum number of PDUs in one compound */
#define MAX_COMPOUND 7
/*
* Default number of credits to keep available for SMB3.
* This value is chosen somewhat arbitrarily. The Windows client
* defaults to 128 credits, the Windows server allows clients up to
* 512 credits (or 8K for later versions), and the NetApp server
* does not limit clients at all. Choose a high enough default value
* such that the client shouldn't limit performance, but allow mount
* to override (until you approach 64K, where we limit credits to 65000
* to reduce possibility of seeing more server credit overflow bugs.
*/
#define SMB2_MAX_CREDITS_AVAILABLE 32000
#include "cifspdu.h"
#ifndef XATTR_DOS_ATTRIB
#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
#endif
#define CIFS_MAX_WORKSTATION_LEN (__NEW_UTS_LEN + 1) /* reasonable max for client */
#define CIFS_DFS_ROOT_SES(ses) ((ses)->dfs_root_ses ?: (ses))
/*
* CIFS vfs client Status information (based on what we know.)
*/
/* associated with each connection */
enum statusEnum {
CifsNew = 0,
CifsGood,
CifsExiting,
CifsNeedReconnect,
CifsNeedNegotiate,
CifsInNegotiate,
};
/* associated with each smb session */
enum ses_status_enum {
SES_NEW = 0,
SES_GOOD,
SES_EXITING,
SES_NEED_RECON,
SES_IN_SETUP
};
/* associated with each tree connection to the server */
enum tid_status_enum {
TID_NEW = 0,
TID_GOOD,
TID_EXITING,
TID_NEED_RECON,
TID_NEED_TCON,
TID_IN_TCON,
TID_NEED_FILES_INVALIDATE, /* currently unused */
TID_IN_FILES_INVALIDATE
};
enum securityEnum {
Unspecified = 0, /* not specified */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
Kerberos, /* Kerberos via SPNEGO */
};
enum upcall_target_enum {
UPTARGET_UNSPECIFIED, /* not specified, defaults to app */
UPTARGET_MOUNT, /* upcall to the mount namespace */
UPTARGET_APP, /* upcall to the application namespace which did the mount */
};
enum cifs_reparse_type {
CIFS_REPARSE_TYPE_NFS,
CIFS_REPARSE_TYPE_WSL,
CIFS_REPARSE_TYPE_DEFAULT = CIFS_REPARSE_TYPE_NFS,
};
static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type)
{
switch (type) {
case CIFS_REPARSE_TYPE_NFS:
return "nfs";
case CIFS_REPARSE_TYPE_WSL:
return "wsl";
default:
return "unknown";
}
}
struct session_key {
unsigned int len;
char *response;
};
/* crypto hashing related structure/fields, not specific to a sec mech */
struct cifs_secmech {
struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
struct crypto_aead *dec; /* smb3 decryption AEAD TFM (AES-CCM and AES-GCM) */
};
/* per smb session structure/fields */
struct ntlmssp_auth {
bool sesskey_per_smbsess; /* whether session key is per smb session */
__u32 client_flags; /* sent by client in type 1 ntlmsssp exchange */
__u32 server_flags; /* sent by server in type 2 ntlmssp exchange */
unsigned char ciphertext[CIFS_CPHTXT_SIZE]; /* sent to server */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlmssp */
};
struct cifs_cred {
int uid;
int gid;
int mode;
int cecount;
struct smb_sid osid;
struct smb_sid gsid;
struct cifs_ntace *ntaces;
struct smb_ace *aces;
};
struct cifs_open_info_data {
bool adjust_tz;
union {
bool reparse_point;
bool symlink;
};
struct {
/* ioctl response buffer */
struct {
int buftype;
struct kvec iov;
} io;
__u32 tag;
union {
struct reparse_data_buffer *buf;
struct reparse_posix_data *posix;
};
} reparse;
struct {
__u8 eas[SMB2_WSL_MAX_QUERY_EA_RESP_SIZE];
unsigned int eas_len;
} wsl;
char *symlink_target;
struct smb_sid posix_owner;
struct smb_sid posix_group;
union {
struct smb2_file_all_info fi;
struct smb311_posix_qinfo posix_fi;
};
};
/*
*****************************************************************
* Except the CIFS PDUs themselves all the
* globally interesting structs should go here
*****************************************************************
*/
/*
* A smb_rqst represents a complete request to be issued to a server. It's
* formed by a kvec array, followed by an array of pages. Page data is assumed
* to start at the beginning of the first page.
*/
struct smb_rqst {
struct kvec *rq_iov; /* array of kvecs */
unsigned int rq_nvec; /* number of kvecs in array */
struct iov_iter rq_iter; /* Data iterator */
struct folio_queue *rq_buffer; /* Buffer for encryption */
};
struct mid_q_entry;
struct TCP_Server_Info;
struct cifsFileInfo;
struct cifs_ses;
struct cifs_tcon;
struct dfs_info3_param;
struct cifs_fattr;
struct smb3_fs_context;
struct cifs_fid;
struct cifs_io_subrequest;
struct cifs_io_parms;
struct cifs_search_info;
struct cifsInodeInfo;
struct cifs_open_parms;
struct cifs_credits;
struct smb_version_operations {
int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *,
struct mid_q_entry *);
bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
/* setup request: allocate mid, sign message */
struct mid_q_entry *(*setup_request)(struct cifs_ses *,
struct TCP_Server_Info *,
struct smb_rqst *);
/* setup async request: allocate mid, sign message */
struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
struct smb_rqst *);
/* check response: verify signature, map error */
int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
bool);
void (*add_credits)(struct TCP_Server_Info *server,
struct cifs_credits *credits,
const int optype);
void (*set_credits)(struct TCP_Server_Info *, const int);
int * (*get_credits_field)(struct TCP_Server_Info *, const int);
unsigned int (*get_credits)(struct mid_q_entry *);
__u64 (*get_next_mid)(struct TCP_Server_Info *);
void (*revert_current_mid)(struct TCP_Server_Info *server,
const unsigned int val);
/* data offset from read response message */
unsigned int (*read_data_offset)(char *);
/*
* Data length from read response message
* When in_remaining is true, the returned data length is in
* message field DataRemaining for out-of-band data read (e.g through
* Memory Registration RDMA write in SMBD).
* Otherwise, the returned data length is in message field DataLength.
*/
unsigned int (*read_data_length)(char *, bool in_remaining);
/* map smb to linux error */
int (*map_error)(char *, bool);
/* find mid corresponding to the response message */
struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
void (*dump_detail)(void *buf, struct TCP_Server_Info *ptcp_info);
void (*clear_stats)(struct cifs_tcon *);
void (*print_stats)(struct seq_file *m, struct cifs_tcon *);
void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *);
/* verify the message */
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache);
/* process transaction2 response */
bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
char *, int);
/* check if we need to negotiate */
bool (*need_neg)(struct TCP_Server_Info *);
/* negotiate to the server */
int (*negotiate)(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server);
/* set negotiated write size */
unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* set negotiated read size */
unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* setup smb sessionn */
int (*sess_setup)(const unsigned int, struct cifs_ses *,
struct TCP_Server_Info *server,
const struct nls_table *);
/* close smb session */
int (*logoff)(const unsigned int, struct cifs_ses *);
/* connect to a server share */
int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
struct cifs_tcon *, const struct nls_table *);
/* close tree connection */
int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
/* get DFS referrals */
int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
const char *, struct dfs_info3_param **,
unsigned int *, const struct nls_table *, int);
/* informational QFS call */
void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *);
/* query for server interfaces */
int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *,
bool);
/* check if a path is accessible or not */
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
/* query path data from the server */
int (*query_path_info)(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const char *full_path,
struct cifs_open_info_data *data);
/* query file data from the server */
int (*query_file_info)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile, struct cifs_open_info_data *data);
/* query reparse point to determine which type of special file */
int (*query_reparse_point)(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const char *full_path,
u32 *tag, struct kvec *rsp,
int *rsp_buftype);
/* get server index number */
int (*get_srv_inum)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid,
struct cifs_open_info_data *data);
/* set size by path */
int (*set_path_size)(const unsigned int, struct cifs_tcon *,
const char *, __u64, struct cifs_sb_info *, bool,
struct dentry *);
/* set size by file handle */
int (*set_file_size)(const unsigned int, struct cifs_tcon *,
struct cifsFileInfo *, __u64, bool);
/* set attributes */
int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
const unsigned int);
int (*set_compression)(const unsigned int, struct cifs_tcon *,
struct cifsFileInfo *);
/* check if we can send an echo or nor */
bool (*can_echo)(struct TCP_Server_Info *);
/* send echo request */
int (*echo)(struct TCP_Server_Info *);
/* create directory */
int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
struct cifs_sb_info *cifs_sb);
int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode,
struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *sb);
/* set info on created directory */
void (*mkdir_setinfo)(struct inode *, const char *,
struct cifs_sb_info *, struct cifs_tcon *,
const unsigned int);
/* remove directory */
int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *,
struct cifs_sb_info *);
/* unlink file */
int (*unlink)(const unsigned int, struct cifs_tcon *, const char *,
struct cifs_sb_info *, struct dentry *);
/* open, rename and delete file */
int (*rename_pending_delete)(const char *, struct dentry *,
const unsigned int);
/* send rename request */
int (*rename)(const unsigned int xid,
struct cifs_tcon *tcon,
struct dentry *source_dentry,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb);
/* send create hardlink request */
int (*create_hardlink)(const unsigned int xid,
struct cifs_tcon *tcon,
struct dentry *source_dentry,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb);
/* query symlink target */
int (*query_symlink)(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const char *full_path,
char **target_path);
/* open a file for non-posix mounts */
int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
void *buf);
/* set fid protocol-specific info */
void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
/* close a file */
int (*close)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *);
/* close a file, returning file attributes and timestamps */
int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *pfile_info);
/* send a flush request to the server */
int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
/* async read from the server */
int (*async_readv)(struct cifs_io_subrequest *);
/* async write to the server */
void (*async_writev)(struct cifs_io_subrequest *);
/* sync read from the server */
int (*sync_read)(const unsigned int, struct cifs_fid *,
struct cifs_io_parms *, unsigned int *, char **,
int *);
/* sync write to the server */
int (*sync_write)(const unsigned int, struct cifs_fid *,
struct cifs_io_parms *, unsigned int *, struct kvec *,
unsigned long);
/* open dir, start readdir */
int (*query_dir_first)(const unsigned int, struct cifs_tcon *,
const char *, struct cifs_sb_info *,
struct cifs_fid *, __u16,
struct cifs_search_info *);
/* continue readdir */
int (*query_dir_next)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *,
__u16, struct cifs_search_info *srch_inf);
/* close dir */
int (*close_dir)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *);
/* calculate a size of SMB message */
unsigned int (*calc_smb_size)(void *buf);
/* check for STATUS_PENDING and process the response if yes */
bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
/* check for STATUS_NETWORK_SESSION_EXPIRED */
bool (*is_session_expired)(char *);
/* send oplock break response */
int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
__u16 net_fid, struct cifsInodeInfo *cifs_inode);
/* query remote filesystem */
int (*queryfs)(const unsigned int, struct cifs_tcon *,
const char *, struct cifs_sb_info *, struct kstatfs *);
/* send mandatory brlock to the server */
int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
__u64, __u32, int, int, bool);
/* unlock range of mandatory locks */
int (*mand_unlock_range)(struct cifsFileInfo *, struct file_lock *,
const unsigned int);
/* push brlocks from the cache to the server */
int (*push_mand_locks)(struct cifsFileInfo *);
/* get lease key of the inode */
void (*get_lease_key)(struct inode *, struct cifs_fid *);
/* set lease key of the inode */
void (*set_lease_key)(struct inode *, struct cifs_fid *);
/* generate new lease key */
void (*new_lease_key)(struct cifs_fid *);
int (*generate_signingkey)(struct cifs_ses *ses,
struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
bool allocate_crypto);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file);
int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file, void __user *);
int (*notify)(const unsigned int xid, struct file *pfile,
void __user *pbuf, bool return_changes);
int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const unsigned char *,
char *, unsigned int *);
int (*create_mf_symlink)(unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const unsigned char *,
char *, unsigned int *);
/* if we can do cache read operations */
bool (*is_read_op)(__u32);
/* set oplock level for the inode */
void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
bool *);
/* create lease context buffer for CREATE request */
char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
ssize_t (*copychunk_range)(const unsigned int,
struct cifsFileInfo *src_file,
struct cifsFileInfo *target_file,
u64 src_off, u64 len, u64 dest_off);
int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
struct cifsFileInfo *target_file, u64 src_off, u64 len,
u64 dest_off);
int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
const unsigned char *, const unsigned char *, char *,
size_t, struct cifs_sb_info *);
int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
const char *, const void *, const __u16,
const struct nls_table *, struct cifs_sb_info *);
struct smb_ntsd * (*get_acl)(struct cifs_sb_info *cifssb, struct inode *ino,
const char *patch, u32 *plen, u32 info);
struct smb_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *cifssmb,
const struct cifs_fid *pfid, u32 *plen, u32 info);
int (*set_acl)(struct smb_ntsd *pntsd, __u32 len, struct inode *ino, const char *path,
int flag);
/* writepages retry size */
unsigned int (*wp_retry_size)(struct inode *);
/* get mtu credits */
int (*wait_mtu_credits)(struct TCP_Server_Info *, size_t,
size_t *, struct cifs_credits *);
/* adjust previously taken mtu credits to request size */
int (*adjust_credits)(struct TCP_Server_Info *server,
struct cifs_io_subrequest *subreq,
unsigned int /*enum smb3_rw_credits_trace*/ trace);
/* check if we need to issue closedir */
bool (*dir_needs_close)(struct cifsFileInfo *);
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
loff_t);
/* init transform (compress/encrypt) request */
int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
struct smb_rqst *, struct smb_rqst *);
int (*is_transform_hdr)(void *buf);
int (*receive_transform)(struct TCP_Server_Info *,
struct mid_q_entry **, char **, int *);
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
int (*next_header)(struct TCP_Server_Info *server, char *buf,
unsigned int *noff);
/* ioctl passthrough for query_info */
int (*ioctl_query_info)(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p);
/* make unix special files (block, char, fifo, socket) */
int (*make_node)(unsigned int xid,
struct inode *inode,
struct dentry *dentry,
struct cifs_tcon *tcon,
const char *full_path,
umode_t mode,
dev_t device_number);
/* version specific fiemap implementation */
int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *,
struct fiemap_extent_info *, u64, u64);
/* version specific llseek implementation */
loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
/* Check for STATUS_IO_TIMEOUT */
bool (*is_status_io_timeout)(char *buf);
/* Check for STATUS_NETWORK_NAME_DELETED */
bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
const char *full_path,
struct kvec *rsp_iov,
struct cifs_open_info_data *data);
int (*create_reparse_symlink)(const unsigned int xid,
struct inode *inode,
struct dentry *dentry,
struct cifs_tcon *tcon,
const char *full_path,
const char *symname);
};
struct smb_version_values {
char *version_string;
__u16 protocol_id;
__u32 req_capabilities;
__u32 large_lock_type;
__u32 exclusive_lock_type;
__u32 shared_lock_type;
__u32 unlock_lock_type;
size_t header_preamble_size;
size_t header_size;
size_t max_header_size;
size_t read_rsp_size;
__le16 lock_cmd;
unsigned int cap_unix;
unsigned int cap_nt_find;
unsigned int cap_large_files;
__u16 signing_enabled;
__u16 signing_required;
size_t create_lease_size;
};
#define HEADER_SIZE(server) (server->vals->header_size)
#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size)
#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server))
/**
* CIFS superblock mount flags (mnt_cifs_flags) to consider when
* trying to reuse existing superblock for a new mount
*/
#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
CIFS_MOUNT_MAP_SFM_CHR | \
CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \
CIFS_MOUNT_UID_FROM_ACL | CIFS_MOUNT_NO_HANDLE_CACHE | \
CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID | \
CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE)
/**
* Generic VFS superblock mount flags (s_flags) to consider when
* trying to reuse existing superblock for a new mount
*/
#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
SB_NODEV | SB_SYNCHRONOUS)
struct cifs_mnt_data {
struct cifs_sb_info *cifs_sb;
struct smb3_fs_context *ctx;
int flags;
};
static inline unsigned int
get_rfc1002_length(void *buf)
{
return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
static inline void
inc_rfc1001_len(void *buf, int count)
{
be32_add_cpu((__be32 *)buf, count);
}
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
struct smb_version_operations *ops;
struct smb_version_values *vals;
/* updates to tcpStatus protected by cifs_tcp_ses_lock */
enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
struct sockaddr_storage dstaddr;
struct sockaddr_storage srcaddr; /* locally bind to this IP */
#ifdef CONFIG_NET_NS
struct net *net;
#endif
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
spinlock_t mid_lock; /* protect mid queue and it's entries */
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
bool nosharesock;
bool tcp_nodelay;
bool terminate;
unsigned int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
unsigned int in_flight; /* number of requests on the wire to server */
unsigned int max_in_flight; /* max number of requests that were on wire */
spinlock_t req_lock; /* protect the two values above */
struct mutex _srv_mutex;
unsigned int nofs_flag;
struct task_struct *tsk;
char server_GUID[16];
__u16 sec_mode;
bool sign; /* is signing enabled on this connection? */
bool ignore_signature:1; /* skip validation of signatures in SMB2/3 rsp */
bool session_estab; /* mark when very first sess is established */
int echo_credits; /* echo reserved slots */
int oplock_credits; /* oplock break reserved slots */
bool echoes:1; /* enable echoes */
__u8 client_guid[SMB2_CLIENT_GUID_SIZE]; /* Client GUID */
u16 dialect; /* dialect index that server chose */
bool oplocks:1; /* enable oplocks */
unsigned int maxReq; /* Clients should submit no more */
/* than maxReq distinct unanswered SMBs to the server when using */
/* multiplexed reads or writes (for SMB1/CIFS only, not SMB2/SMB3) */
unsigned int maxBuf; /* maxBuf specifies the maximum */
/* message size the server can send or receive for non-raw SMBs */
/* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
/* when socket is setup (and during reconnect) before NegProt sent */
unsigned int max_rw; /* maxRw specifies the maximum */
/* message size the server can send or receive for */
/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
unsigned int capabilities; /* selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* for signing, protected by srv_mutex */
__u32 reconnect_instance; /* incremented on each reconnect */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
#define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */
#define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */
char negflavor; /* NEGOTIATE response flavor */
/* extended security flavors that server supports */
bool sec_ntlmssp; /* supports NTLMSSP */
bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool large_buf; /* is current buffer large? */
/* use SMBD connection instead of socket */
bool rdma;
/* point to the SMBD connection if RDMA is used instead of socket */
struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
unsigned int pdu_size;
unsigned int total_read; /* total amount of data read in this pass */
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
#ifdef CONFIG_CIFS_STATS2
atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
__u32 slowest_cmd[NUMBER_OF_SMB2_COMMANDS];
__u32 fastest_cmd[NUMBER_OF_SMB2_COMMANDS];
#endif /* STATS2 */
unsigned int max_read;
unsigned int max_write;
unsigned int min_offload;
unsigned int retrans;
struct {
bool requested; /* "compress" mount option set*/
bool enabled; /* actually negotiated with server */
__le16 alg; /* preferred alg negotiated with server */
} compression;
__u16 signing_algorithm;
__le16 cipher_type;
/* save initial negprot hash */
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
bool signing_negotiated; /* true if valid signing context rcvd from server */
bool posix_ext_supported;
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
unsigned long echo_interval;
/*
* Number of targets available for reconnect. The more targets
* the more tasks have to wait to let the demultiplex thread
* reconnect.
*/
int nr_targets;
bool noblockcnt; /* use non-blocking connect() */
/*
* If this is a session channel,
* primary_server holds the ref-counted
* pointer to primary channel connection for the session.
*/
#define SERVER_IS_CHAN(server) (!!(server)->primary_server)
struct TCP_Server_Info *primary_server;
__u16 channel_sequence_num; /* incremented on primary channel on each chan reconnect */
#ifdef CONFIG_CIFS_SWN_UPCALL
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
struct mutex refpath_lock; /* protects leaf_fullpath */
/*
* leaf_fullpath: Canonical DFS referral path related to this
* connection.
* It is used in DFS cache refresher, reconnect and may
* change due to nested DFS links.
*
* Protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mostly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparisons against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
bool dfs_conn:1;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
{
return HEADER_PREAMBLE_SIZE(server) != 0;
}
static inline void cifs_server_lock(struct TCP_Server_Info *server)
{
unsigned int nofs_flag = memalloc_nofs_save();
mutex_lock(&server->_srv_mutex);
server->nofs_flag = nofs_flag;
}
static inline void cifs_server_unlock(struct TCP_Server_Info *server)
{
unsigned int nofs_flag = server->nofs_flag;
mutex_unlock(&server->_srv_mutex);
memalloc_nofs_restore(nofs_flag);
}
struct cifs_credits {
unsigned int value;
unsigned int instance;
unsigned int in_flight_check;
unsigned int rreq_debug_id;
unsigned int rreq_debug_index;
};
static inline unsigned int
in_flight(struct TCP_Server_Info *server)
{
unsigned int num;
spin_lock(&server->req_lock);
num = server->in_flight;
spin_unlock(&server->req_lock);
return num;
}
static inline bool
has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
{
int num;
spin_lock(&server->req_lock);
num = *credits;
spin_unlock(&server->req_lock);
return num >= num_credits;
}
static inline void
add_credits(struct TCP_Server_Info *server, struct cifs_credits *credits,
const int optype)
{
server->ops->add_credits(server, credits, optype);
}
static inline void
add_credits_and_wake_if(struct TCP_Server_Info *server,
struct cifs_credits *credits, const int optype)
{
if (credits->value) {
server->ops->add_credits(server, credits, optype);
wake_up(&server->request_q);
credits->value = 0;
}
}
static inline void
set_credits(struct TCP_Server_Info *server, const int val)
{
server->ops->set_credits(server, val);
}
static inline int
adjust_credits(struct TCP_Server_Info *server, struct cifs_io_subrequest *subreq,
unsigned int /* enum smb3_rw_credits_trace */ trace)
{
return server->ops->adjust_credits ?
server->ops->adjust_credits(server, subreq, trace) : 0;
}
static inline __le64
get_next_mid64(struct TCP_Server_Info *server)
{
return cpu_to_le64(server->ops->get_next_mid(server));
}
static inline __le16
get_next_mid(struct TCP_Server_Info *server)
{
__u16 mid = server->ops->get_next_mid(server);
/*
* The value in the SMB header should be little endian for easy
* on-the-wire decoding.
*/
return cpu_to_le16(mid);
}
static inline void
revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{
if (server->ops->revert_current_mid)
server->ops->revert_current_mid(server, val);
}
static inline void
revert_current_mid_from_hdr(struct TCP_Server_Info *server,
const struct smb2_hdr *shdr)
{
unsigned int num = le16_to_cpu(shdr->CreditCharge);
return revert_current_mid(server, num > 0 ? num : 1);
}
static inline __u16
get_mid(const struct smb_hdr *smb)
{
return le16_to_cpu(smb->Mid);
}
static inline bool
compare_mid(__u16 mid, const struct smb_hdr *smb)
{
return mid == le16_to_cpu(smb->Mid);
}
/*
* When the server supports very large reads and writes via POSIX extensions,
* we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
* including the RFC1001 length.
*
* Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the
* pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
*
* For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done
* once.
*/
#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
/*
* When the server doesn't allow large posix writes, only allow a rsize/wsize
* of 2^17-1 minus the size of the call header. That allows for a read or
* write up to the maximum size described by RFC1002.
*/
#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
* those values when posix extensions aren't in force. In actuality here, we
* use 65536 to allow for a write that is a multiple of 4k. Most servers seem
* to be ok with the extra byte even though Windows doesn't send writes that
* are that large.
*
* Citation:
*
* https://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
*/
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
/*
* Macros to allow the TCP_Server_Info->net field and related code to drop out
* when CONFIG_NET_NS isn't set.
*/
#ifdef CONFIG_NET_NS
static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
{
return srv->net;
}
static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
{
srv->net = net;
}
#else
static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
{
return &init_net;
}
static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
{
}
#endif
struct cifs_server_iface {
struct list_head iface_head;
struct kref refcount;
size_t speed;
size_t weight_fulfilled;
unsigned int num_channels;
unsigned int rdma_capable : 1;
unsigned int rss_capable : 1;
unsigned int is_active : 1; /* unset if non existent */
struct sockaddr_storage sockaddr;
};
/* release iface when last ref is dropped */
static inline void
release_iface(struct kref *ref)
{
struct cifs_server_iface *iface = container_of(ref,
struct cifs_server_iface,
refcount);
kfree(iface);
}
struct cifs_chan {
unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
struct TCP_Server_Info *server;
struct cifs_server_iface *iface; /* interface in use */
__u8 signkey[SMB3_SIGN_KEY_SIZE];
};
#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
/*
* Session structure. One of these for each uid session with a particular host
*/
struct cifs_ses {
struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct list_head dlist; /* dfs list */
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */
unsigned int overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
__u64 Suid; /* remote smb uid */
kuid_t linux_uid; /* overriding owner of files on the mount */
kuid_t cred_uid; /* owner of credentials */
unsigned int capabilities;
char ip_addr[INET6_ADDRSTRLEN + 1]; /* Max ipv6 (or v4) addr string len */
char *user_name; /* must not be null except during init of sess
and after mount option parsing we fill it */
char *domainName;
char *password;
char *password2; /* When key rotation used, new password may be set before it expires */
char workstation_name[CIFS_MAX_WORKSTATION_LEN];
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */
enum upcall_target_enum upcall_target; /* what upcall target was specified? */
bool sign; /* is signing required? */
bool domainAuto:1;
bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */
unsigned int flags;
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
/*
* Network interfaces available on the server this session is
* connected to.
*
* Other channels can be opened by connecting and binding this
* session to interfaces from this list.
*
* iface_lock should be taken when accessing any of these fields
*/
spinlock_t iface_lock;
/* ========= begin: protected by iface_lock ======== */
struct list_head iface_list;
size_t iface_count;
unsigned long iface_last_update; /* jiffies */
/* ========= end: protected by iface_lock ======== */
spinlock_t chan_lock;
/* ========= begin: protected by chan_lock ======== */
#define CIFS_MAX_CHANNELS 16
#define CIFS_INVAL_CHAN_INDEX (-1)
#define CIFS_ALL_CHANNELS_SET(ses) \
((1UL << (ses)->chan_count) - 1)
#define CIFS_ALL_CHANS_GOOD(ses) \
(!(ses)->chans_need_reconnect)
#define CIFS_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_CHAN_NEEDS_RECONNECT(ses, index) \
test_bit((index), &(ses)->chans_need_reconnect)
#define CIFS_CHAN_IN_RECONNECT(ses, index) \
((ses)->chans[(index)].in_reconnect)
struct cifs_chan chans[CIFS_MAX_CHANNELS];
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
/*
* chans_need_reconnect is a bitmap indicating which of the channels
* under this smb session needs to be reconnected.
* If not multichannel session, only one bit will be used.
*
* We will ask for sess and tcon reconnection only if all the
* channels are marked for needing reconnection. This will
* enable the sessions on top to continue to live till any
* of the channels below are active.
*/
unsigned long chans_need_reconnect;
/* ========= end: protected by chan_lock ======== */
struct cifs_ses *dfs_root_ses;
struct nls_table *local_nls;
};
static inline bool
cap_unix(struct cifs_ses *ses)
{
return ses->server->vals->cap_unix & ses->capabilities;
}
/*
* common struct for holding inode info when searching for or updating an
* inode with new info
*/
#define CIFS_FATTR_JUNCTION 0x1
#define CIFS_FATTR_DELETE_PENDING 0x2
#define CIFS_FATTR_NEED_REVAL 0x4
#define CIFS_FATTR_INO_COLLISION 0x8
#define CIFS_FATTR_UNKNOWN_NLINK 0x10
#define CIFS_FATTR_FAKE_ROOT_INO 0x20
struct cifs_fattr {
u32 cf_flags;
u32 cf_cifsattrs;
u64 cf_uniqueid;
u64 cf_eof;
u64 cf_bytes;
u64 cf_createtime;
kuid_t cf_uid;
kgid_t cf_gid;
umode_t cf_mode;
dev_t cf_rdev;
unsigned int cf_nlink;
unsigned int cf_dtype;
struct timespec64 cf_atime;
struct timespec64 cf_mtime;
struct timespec64 cf_ctime;
u32 cf_cifstag;
char *cf_symlink_target;
};
/*
* there is one of these for each connection to a resource on a particular
* session
*/
struct cifs_tcon {
struct list_head tcon_list;
int debug_id; /* Debugging for tracing */
int tc_count;
struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
atomic_t num_local_opens; /* num of all opens including disconnected */
atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList;
spinlock_t open_file_lock; /* protects list above */
struct cifs_ses *ses; /* pointer to session associated with */
char tree_name[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
char *password; /* for share-level security */
__u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */
enum tid_status_enum status;
atomic_t num_smbs_sent;
union {
struct {
atomic_t num_writes;
atomic_t num_reads;
atomic_t num_flushes;
atomic_t num_oplock_brks;
atomic_t num_opens;
atomic_t num_closes;
atomic_t num_deletes;
atomic_t num_mkdirs;
atomic_t num_posixopens;
atomic_t num_posixmkdirs;
atomic_t num_rmdirs;
atomic_t num_renames;
atomic_t num_t2renames;
atomic_t num_ffirst;
atomic_t num_fnext;
atomic_t num_fclose;
atomic_t num_hardlinks;
atomic_t num_symlinks;
atomic_t num_locks;
atomic_t num_acl_get;
atomic_t num_acl_set;
} cifs_stats;
struct {
atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
} smb2_stats;
} stats;
__u64 bytes_read;
__u64 bytes_written;
spinlock_t stat_lock; /* protects the two fields above */
time64_t stats_from_time;
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
bool ipc:1; /* set if connection to IPC$ share (always also pipe) */
bool pipe:1; /* set if connection to pipe share */
bool print:1; /* set if connection to printer share */
bool retry:1;
bool nocase:1;
bool nohandlecache:1; /* if strange server resource prob can turn off */
bool nodelete:1;
bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
bool posix_extensions; /* if true SMB3.11 posix extensions enabled */
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool broken_sparse_sup; /* if server or share does not support sparse */
bool need_reconnect:1; /* connection reset, tid now invalid */
bool need_reopen_files:1; /* need to reopen tcon file handles */
bool use_resilient:1; /* use resilient instead of durable handles */
bool use_persistent:1; /* use persistent instead of durable handles */
bool no_lease:1; /* Do not request leases on files or directories */
bool use_witness:1; /* use witness protocol */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
__u32 vol_serial_number;
__le64 vol_create_time;
__u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
__u32 handle_timeout; /* persistent and durable handle timeout in ms */
__u32 ss_flags; /* sector size flags */
__u32 perf_sector_size; /* best sector size for perf */
__u32 max_chunks;
__u32 max_bytes_chunk;
__u32 max_bytes_copy;
__u32 max_cached_dirs;
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
bool fscache_acquired; /* T if we've tried acquiring a cookie */
struct fscache_volume *fscache; /* cookie for share */
struct mutex fscache_lock; /* Prevent regetting a cookie */
#endif
struct list_head pending_opens; /* list of incomplete opens */
struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
};
/*
* This is a refcounted and timestamped container for a tcon pointer. The
* container holds a tcon reference. It is considered safe to free one of
* these when the tl_count goes to 0. The tl_time is the time of the last
* "get" on the container.
*/
struct tcon_link {
struct rb_node tl_rbnode;
kuid_t tl_uid;
unsigned long tl_flags;
#define TCON_LINK_MASTER 0
#define TCON_LINK_PENDING 1
#define TCON_LINK_IN_TREE 2
unsigned long tl_time;
atomic_t tl_count;
struct cifs_tcon *tl_tcon;
};
extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst);
static inline struct cifs_tcon *
tlink_tcon(struct tcon_link *tlink)
{
return tlink->tl_tcon;
}
static inline struct tcon_link *
cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
{
return cifs_sb->master_tlink;
}
extern void cifs_put_tlink(struct tcon_link *tlink);
static inline struct tcon_link *
cifs_get_tlink(struct tcon_link *tlink)
{
if (tlink && !IS_ERR(tlink))
atomic_inc(&tlink->tl_count);
return tlink;
}
/* This function is always expected to succeed */
extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
#define CIFS_OPLOCK_NO_CHANGE 0xfe
struct cifs_pending_open {
struct list_head olist;
struct tcon_link *tlink;
__u8 lease_key[16];
__u32 oplock;
};
struct cifs_deferred_close {
struct list_head dlist;
struct tcon_link *tlink;
__u16 netfid;
__u64 persistent_fid;
__u64 volatile_fid;
};
/*
* This info hangs off the cifsFileInfo structure, pointed to by llist.
* This is used to track byte stream locks on the file
*/
struct cifsLockInfo {
struct list_head llist; /* pointer to next cifsLockInfo */
struct list_head blist; /* pointer to locks blocked on this */
wait_queue_head_t block_q;
__u64 offset;
__u64 length;
__u32 pid;
__u16 type;
__u16 flags;
};
/*
* One of these for each open instance of a file
*/
struct cifs_search_info {
loff_t index_of_last_entry;
__u16 entries_in_buffer;
__u16 info_level;
__u32 resume_key;
char *ntwrk_buf_start;
char *srch_entries_start;
char *last_entry;
const char *presume_name;
unsigned int resume_name_len;
bool endOfSearch:1;
bool emptyDir:1;
bool unicode:1;
bool smallBuf:1; /* so we know which buf_release function to call */
};
#define ACL_NO_MODE ((umode_t)(-1))
struct cifs_open_parms {
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
int disposition;
int desired_access;
int create_options;
const char *path;
struct cifs_fid *fid;
umode_t mode;
bool reconnect:1;
bool replay:1; /* indicates that this open is for a replay */
struct kvec *ea_cctx;
};
struct cifs_fid {
__u16 netfid;
__u64 persistent_fid; /* persist file id for smb2 */
__u64 volatile_fid; /* volatile file id for smb2 */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
__u8 create_guid[16];
__u32 access;
struct cifs_pending_open *pending_open;
unsigned int epoch;
#ifdef CONFIG_CIFS_DEBUG2
__u64 mid;
#endif /* CIFS_DEBUG2 */
bool purge_cache;
};
struct cifs_fid_locks {
struct list_head llist;
struct cifsFileInfo *cfile; /* fid that owns locks */
struct list_head locks; /* locks held by fid above */
};
struct cifsFileInfo {
/* following two lists are protected by tcon->open_file_lock */
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
/* lock list below protected by cifsi->lock_sem */
struct cifs_fid_locks *llist; /* brlocks held by this fid */
kuid_t uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
struct cifs_fid fid; /* file id from remote */
struct list_head rlist; /* reconnect list */
/* BB add lock scope info here if needed */
/* lock scope id (0 if none) */
struct dentry *dentry;
struct tcon_link *tlink;
unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool swapfile:1;
bool oplock_break_cancelled:1;
bool status_file_deleted:1; /* file has been deleted */
bool offload:1; /* offload final part of _put to a wq */
unsigned int oplock_epoch; /* epoch from the lease break */
__u32 oplock_level; /* oplock/lease level from the lease break */
int count;
spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
struct work_struct serverclose; /* work for serverclose */
struct delayed_work deferred;
bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
char *symlink_target;
};
struct cifs_io_parms {
__u16 netfid;
__u64 persistent_fid; /* persist file id for smb2 */
__u64 volatile_fid; /* volatile file id for smb2 */
__u32 pid;
__u64 offset;
unsigned int length;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
};
struct cifs_io_request {
struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
struct TCP_Server_Info *server;
pid_t pid;
};
/* asynchronous read support */
struct cifs_io_subrequest {
union {
struct netfs_io_subrequest subreq;
struct netfs_io_request *rreq;
struct cifs_io_request *req;
};
ssize_t got_bytes;
unsigned int xid;
int result;
bool have_xid;
bool replay;
struct kvec iov[2];
struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
struct cifs_credits credits;
};
/*
* Take a reference on the file private data. Must be called with
* cfile->file_info_lock held.
*/
static inline void
cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
{
++cifs_file->count;
}
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
bool offload);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_READ_FLG 1
#define CIFS_CACHE_HANDLE_FLG 2
#define CIFS_CACHE_RH_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_HANDLE_FLG)
#define CIFS_CACHE_WRITE_FLG 4
#define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
#define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
/*
* One of these for each file inode
*/
struct cifsInodeInfo {
struct netfs_inode netfs; /* Netfslib context and vfs inode */
bool can_cache_brlcks;
struct list_head llist; /* locks helb by this inode */
/*
* NOTE: Some code paths call down_read(lock_sem) twice, so
* we must always use cifs_down_write() instead of down_write()
* for this semaphore to avoid deadlocks.
*/
struct rw_semaphore lock_sem; /* protect the fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
spinlock_t open_file_lock; /* protects openFileList */
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
unsigned int oplock; /* oplock/lease level we have */
unsigned int epoch; /* used to track lease state changes */
#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
unsigned long time; /* jiffies of last update of inode */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for this inode */
struct list_head deferred_closes; /* list of deferred closes */
spinlock_t deferred_lock; /* protection on deferred list */
bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
char *symlink_target;
__u32 reparse_tag;
};
static inline struct cifsInodeInfo *
CIFS_I(struct inode *inode)
{
return container_of(inode, struct cifsInodeInfo, netfs.inode);
}
static inline struct cifs_sb_info *
CIFS_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
static inline struct cifs_sb_info *
CIFS_FILE_SB(struct file *file)
{
return CIFS_SB(file_inode(file)->i_sb);
}
static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
{
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
return '/';
else
return '\\';
}
static inline void
convert_delimiter(char *path, char delim)
{
char old_delim, *pos;
if (delim == '/')
old_delim = '\\';
else
old_delim = '/';
pos = path;
while ((pos = strchr(pos, old_delim)))
*pos = delim;
}
#define cifs_stats_inc atomic_inc
static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
unsigned int bytes)
{
if (bytes) {
spin_lock(&tcon->stat_lock);
tcon->bytes_written += bytes;
spin_unlock(&tcon->stat_lock);
}
}
static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
unsigned int bytes)
{
spin_lock(&tcon->stat_lock);
tcon->bytes_read += bytes;
spin_unlock(&tcon->stat_lock);
}
/*
* This is the prototype for the mid receive function. This function is for
* receiving the rest of the SMB frame, starting with the WordCount (which is
* just after the MID in struct smb_hdr). Note:
*
* - This will be called by cifsd, with no locks held.
* - The mid will still be on the pending_mid_q.
* - mid->resp_buf will point to the current buffer.
*
* Returns zero on a successful receive, or an error. The receive state in
* the TCP_Server_Info will also be updated.
*/
typedef int (mid_receive_t)(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
/*
* This is the prototype for the mid callback function. This is called once the
* mid has been received off of the socket. When creating one, take special
* care to avoid deadlocks. Things to bear in mind:
*
* - it will be called by cifsd, with no locks held
* - the mid will be removed from any lists
*/
typedef void (mid_callback_t)(struct mid_q_entry *mid);
/*
* This is the protopyte for mid handle function. This is called once the mid
* has been recognized after decryption of the message.
*/
typedef int (mid_handle_t)(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
struct kref refcount;
struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
__u16 credits; /* number of credits consumed by this mid */
__u16 credits_received; /* number of credits from the response */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
#ifdef CONFIG_CIFS_STATS2
unsigned long when_sent; /* time when smb send finished */
unsigned long when_received; /* when demux complete (taken off wire) */
#endif
mid_receive_t *receive; /* call receive callback */
mid_callback_t *callback; /* call completion callback */
mid_handle_t *handle; /* call handle mid callback */
void *callback_data; /* general purpose pointer for callback */
struct task_struct *creator;
void *resp_buf; /* pointer to received SMB header */
unsigned int resp_buf_size;
int mid_state; /* wish this were enum but can not pass to wait_event */
unsigned int mid_flags;
__le16 command; /* smb command code */
unsigned int optype; /* operation type */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
bool decrypted:1; /* decrypted entry */
};
struct close_cancelled_open {
struct cifs_fid fid;
struct cifs_tcon *tcon;
struct work_struct work;
__u64 mid;
__u16 cmd;
};
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
{
atomic_inc(&server->in_send);
}
static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
{
atomic_dec(&server->in_send);
}
static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
{
atomic_inc(&server->num_waiters);
}
static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
{
atomic_dec(&server->num_waiters);
}
#ifdef CONFIG_CIFS_STATS2
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
mid->when_sent = jiffies;
}
#else
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
}
#endif
/* for pending dnotify requests */
struct dir_notify_req {
struct list_head lhead;
__le16 Pid;
__le16 PidHigh;
__u16 Mid;
__u16 Tid;
__u16 Uid;
__u16 netfid;
__u32 filter; /* CompletionFilter (for multishot) */
int multishot;
struct file *pfile;
};
struct dfs_info3_param {
int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
int path_consumed;
int server_type;
int ref_flag;
char *path_name;
char *node_name;
int ttl;
};
struct file_list {
struct list_head list;
struct cifsFileInfo *cfile;
};
struct cifs_mount_ctx {
struct cifs_sb_info *cifs_sb;
struct smb3_fs_context *fs_ctx;
unsigned int xid;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
};
static inline void __free_dfs_info_param(struct dfs_info3_param *param)
{
kfree(param->path_name);
kfree(param->node_name);
}
static inline void free_dfs_info_param(struct dfs_info3_param *param)
{
if (param)
__free_dfs_info_param(param);
}
static inline void zfree_dfs_info_param(struct dfs_info3_param *param)
{
if (param) {
__free_dfs_info_param(param);
memset(param, 0, sizeof(*param));
}
}
static inline void free_dfs_info_array(struct dfs_info3_param *param,
int number_of_items)
{
int i;
if ((number_of_items == 0) || (param == NULL))
return;
for (i = 0; i < number_of_items; i++) {
kfree(param[i].path_name);
kfree(param[i].node_name);
}
kfree(param);
}
static inline bool is_interrupt_error(int error)
{
switch (error) {
case -EINTR:
case -ERESTARTSYS:
case -ERESTARTNOHAND:
case -ERESTARTNOINTR:
return true;
}
return false;
}
static inline bool is_retryable_error(int error)
{
if (is_interrupt_error(error) || error == -EAGAIN)
return true;
return false;
}
static inline bool is_replayable_error(int error)
{
if (error == -EAGAIN || error == -ECONNABORTED)
return true;
return false;
}
/* cifs_get_writable_file() flags */
#define FIND_WR_ANY 0
#define FIND_WR_FSUID_ONLY 1
#define FIND_WR_WITH_DELETE 2
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
#define MID_REQUEST_SUBMITTED 2
#define MID_RESPONSE_RECEIVED 4
#define MID_RETRY_NEEDED 8 /* session closed while this request out */
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
#define MID_DELETED 2 /* Mid has been dequeued/deleted */
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
#define CIFS_SMALL_BUFFER 1
#define CIFS_LARGE_BUFFER 2
#define CIFS_IOVEC 4 /* array of response buffers */
/* Type of Request to SendReceive2 */
#define CIFS_BLOCKING_OP 1 /* operation can block */
#define CIFS_NON_BLOCKING 2 /* do not block waiting for credits */
#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
#define CIFS_NO_RSP_BUF 0x040 /* no response buffer required */
/* Type of request operation */
#define CIFS_ECHO_OP 0x080 /* echo request */
#define CIFS_OBREAK_OP 0x0100 /* oplock break request */
#define CIFS_NEG_OP 0x0200 /* negotiate request */
#define CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */
/* Lower bitmask values are reserved by others below. */
#define CIFS_SESS_OP 0x2000 /* session setup request */
#define CIFS_OP_MASK 0x2780 /* mask request type */
#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
#define CIFS_COMPRESS_REQ 0x4000 /* compress request before sending */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
#define CIFSSEC_MAY_NTLMV2 0x00004
#define CIFSSEC_MAY_KRB5 0x00008
#define CIFSSEC_MAY_SEAL 0x00040
#define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_MUST_SIGN 0x01001
/* note that only one of the following can be set so the
result of setting MUST flags more than once will be to
require use of the stronger protocol */
#define CIFSSEC_MUST_NTLMV2 0x04004
#define CIFSSEC_MUST_KRB5 0x08008
#ifdef CONFIG_CIFS_UPCALL
#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */
#else
#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */
#endif /* UPCALL */
#define CIFSSEC_MUST_SEAL 0x40040
#define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
#define CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL)
#define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
/*
*****************************************************************
* All constants go here
*****************************************************************
*/
#define UID_HASH (16)
/*
* Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the
* following to be declared.
*/
/****************************************************************************
* Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
* to the locking order. i.e. if two locks are to be held together, the lock that
* appears higher in this list needs to be taken before the other.
*
* If you hold a lock that is lower in this list, and you need to take a higher lock
* (or if you think that one of the functions that you're calling may need to), first
* drop the lock you hold, pick up the higher lock, then the lower one. This will
* ensure that locks are picked up only in one direction in the below table
* (top to bottom).
*
* Also, if you expect a function to be called with a lock held, explicitly document
* this in the comments on top of your function definition.
*
* And also, try to keep the critical sections (lock hold time) to be as minimal as
* possible. Blocking / calling other functions with a lock held always increase
* the risk of a possible deadlock.
*
* Following this rule will avoid unnecessary deadlocks, which can get really hard to
* debug. Also, any new lock that you introduce, please add to this list in the correct
* order.
*
* Please populate this list whenever you introduce new locks in your changes. Or in
* case I've missed some existing locks. Please ensure that it's added in the list
* based on the locking order expected.
*
* =====================================================================================
* Lock Protects Initialization fn
* =====================================================================================
* vol_list_lock
* vol_info->ctx_lock vol_info->ctx
* cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb
* TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
* reconnect_mutex
* TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
* cifs_ses->session_mutex cifs_ses sesInfoAlloc
* cifs_tcon
* cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
* cifs_tcon->pending_opens
* cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
* cifs_tcon->bytes_written
* cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc
* GlobalMid_Lock GlobalMaxActiveXid init_cifs
* GlobalCurrentXid
* GlobalTotalActiveXid
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
* TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
* ->CurrentMid
* (any changes in mid_q_entry fields)
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
* ->credits
* ->echo_credits
* ->oplock_credits
* ->reconnect_instance
* cifs_ses->ses_lock (anything that is not protected by another lock and can change)
* cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
* ->iface_count
* ->iface_last_update
* cifs_ses->chan_lock cifs_ses->chans
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
* inode->i_rwsem, taken by fs/netfs/locking.c e.g. should be taken before cifsInodeInfo locks
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
* cached_fids->cfid_list_lock cifs_tcon->cfids->entries init_cached_dirs
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
#define GLOBAL_EXTERN
#else
#define GLOBAL_EXTERN extern
#endif
/*
* the list of TCP_Server_Info structures, ie each of the sockets
* connecting our client to a distinct server (ip address), is
* chained together by cifs_tcp_ses_list. The list of all our SMB
* sessions (and from that the tree connections) can be found
* by iterating over cifs_tcp_ses_list
*/
extern struct list_head cifs_tcp_ses_list;
/*
* This lock protects the cifs_tcp_ses_list, the list of smb sessions per
* tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon.
* generally the locks should be taken in order tcp_ses_lock before
* tcon->open_file_lock and that before file->file_info_lock since the
* structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
*/
extern spinlock_t cifs_tcp_ses_lock;
/*
* Global transaction id (XID) information
*/
extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
/*
* Global counters, updated atomically
*/
extern atomic_t sesInfoAllocCount;
extern atomic_t tconInfoAllocCount;
extern atomic_t tcpSesNextId;
extern atomic_t tcpSesAllocCount;
extern atomic_t tcpSesReconnectCount;
extern atomic_t tconInfoReconnectCount;
/* Various Debug counters */
extern atomic_t buf_alloc_count; /* current number allocated */
extern atomic_t small_buf_alloc_count;
#ifdef CONFIG_CIFS_STATS2
extern atomic_t total_buf_alloc_count; /* total allocated over all time */
extern atomic_t total_small_buf_alloc_count;
extern unsigned int slow_rsp_threshold; /* number of secs before logging */
#endif
/* Misc globals */
extern bool enable_oplocks; /* enable or disable oplocks */
extern bool lookupCacheEnabled;
extern unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
extern unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
extern bool enable_gcm_256; /* allow optional negotiate of strongest signing (aes-gcm-256) */
extern bool require_gcm_256; /* require use of strongest signing (aes-gcm-256) */
extern bool enable_negotiate_signing; /* request use of faster (GMAC) signing if available */
extern bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
extern unsigned int CIFSMaxBufSize; /* max size not including hdr */
extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
extern unsigned int cifs_min_small; /* min size of small buf pool */
extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
extern unsigned int dir_cache_timeout; /* max time for directory lease caching of dir */
extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */
extern atomic_t mid_count;
void cifs_oplock_break(struct work_struct *work);
void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
void smb2_deferred_work_close(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
extern struct workqueue_struct *decrypt_wq;
extern struct workqueue_struct *fileinfo_put_wq;
extern struct workqueue_struct *cifsoplockd_wq;
extern struct workqueue_struct *deferredclose_wq;
extern struct workqueue_struct *serverclose_wq;
extern struct workqueue_struct *cfid_put_wq;
extern __u32 cifs_lock_secret;
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
extern mempool_t cifs_io_request_pool;
extern mempool_t cifs_io_subrequest_pool;
/* Operations for different SMB versions */
#define SMB1_VERSION_STRING "1.0"
#define SMB20_VERSION_STRING "2.0"
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
extern struct smb_version_operations smb1_operations;
extern struct smb_version_values smb1_values;
extern struct smb_version_operations smb20_operations;
extern struct smb_version_values smb20_values;
#endif /* CIFS_ALLOW_INSECURE_LEGACY */
#define SMB21_VERSION_STRING "2.1"
extern struct smb_version_operations smb21_operations;
extern struct smb_version_values smb21_values;
#define SMBDEFAULT_VERSION_STRING "default"
extern struct smb_version_values smbdefault_values;
#define SMB3ANY_VERSION_STRING "3"
extern struct smb_version_values smb3any_values;
#define SMB30_VERSION_STRING "3.0"
extern struct smb_version_operations smb30_operations;
extern struct smb_version_values smb30_values;
#define SMB302_VERSION_STRING "3.02"
#define ALT_SMB302_VERSION_STRING "3.0.2"
/*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */
extern struct smb_version_values smb302_values;
#define SMB311_VERSION_STRING "3.1.1"
#define ALT_SMB311_VERSION_STRING "3.11"
extern struct smb_version_operations smb311_operations;
extern struct smb_version_values smb311_values;
static inline char *get_security_type_str(enum securityEnum sectype)
{
switch (sectype) {
case RawNTLMSSP:
return "RawNTLMSSP";
case Kerberos:
return "Kerberos";
case NTLMv2:
return "NTLMv2";
default:
return "Unknown";
}
}
static inline bool is_smb1_server(struct TCP_Server_Info *server)
{
return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
}
static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
{
/*
* For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS
* Subsystem Notifies That a Share Is a DFS Share.
*
* For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server
* Application Updates a Share.
*/
if (!tcon || !tcon->ses || !tcon->ses->server)
return false;
return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS :
tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
}
static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
const struct dfs_info3_param *ref)
{
/*
* Check if all targets are capable of handling DFS referrals as per
* MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
*/
return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
}
static inline u64 cifs_flock_len(const struct file_lock *fl)
{
return (u64)fl->fl_end - fl->fl_start + 1;
}
static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
{
if (WARN_ON_ONCE(!ses || !ses->server))
return 0;
/*
* Make workstation name no more than 15 chars when using insecure dialects as some legacy
* servers do require it during NTLMSSP.
*/
if (ses->server->dialect <= SMB20_PROT_ID)
return min_t(size_t, sizeof(ses->workstation_name), RFC1001_NAME_LEN_WITH_NULL);
return sizeof(ses->workstation_name);
}
static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src)
{
memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src));
dst->AccessFlags = src->AccessFlags;
dst->CurrentByteOffset = src->CurrentByteOffset;
dst->Mode = src->Mode;
dst->AlignmentRequirement = src->AlignmentRequirement;
dst->FileNameLength = src->FileNameLength;
}
static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
int num_rqst,
const u8 *sig)
{
unsigned int len, skip;
unsigned int nents = 0;
unsigned long addr;
size_t data_size;
int i, j;
/*
* The first rqst has a transform header where the first 20 bytes are
* not part of the encrypted blob.
*/
skip = 20;
/* Assumes the first rqst has a transform header as the first iov.
* I.e.
* rqst[0].rq_iov[0] is transform header
* rqst[0].rq_iov[1+] data to be encrypted/decrypted
* rqst[1+].rq_iov[0+] data to be encrypted/decrypted
*/
for (i = 0; i < num_rqst; i++) {
data_size = iov_iter_count(&rqst[i].rq_iter);
/* We really don't want a mixture of pinned and unpinned pages
* in the sglist. It's hard to keep track of which is what.
* Instead, we convert to a BVEC-type iterator higher up.
*/
if (data_size &&
WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
return -EIO;
/* We also don't want to have any extra refs or pins to clean
* up in the sglist.
*/
if (data_size &&
WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
return -EIO;
for (j = 0; j < rqst[i].rq_nvec; j++) {
struct kvec *iov = &rqst[i].rq_iov[j];
addr = (unsigned long)iov->iov_base + skip;
if (is_vmalloc_or_module_addr((void *)addr)) {
len = iov->iov_len - skip;
nents += DIV_ROUND_UP(offset_in_page(addr) + len,
PAGE_SIZE);
} else {
nents++;
}
skip = 0;
}
if (data_size)
nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
}
nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
return nents;
}
/* We can not use the normal sg_set_buf() as we will sometimes pass a
* stack object as buf.
*/
static inline void cifs_sg_set_buf(struct sg_table *sgtable,
const void *buf,
unsigned int buflen)
{
unsigned long addr = (unsigned long)buf;
unsigned int off = offset_in_page(addr);
addr &= PAGE_MASK;
if (is_vmalloc_or_module_addr((void *)addr)) {
do {
unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
sg_set_page(&sgtable->sgl[sgtable->nents++],
vmalloc_to_page((void *)addr), len, off);
off = 0;
addr += PAGE_SIZE;
buflen -= len;
} while (buflen);
} else {
sg_set_page(&sgtable->sgl[sgtable->nents++],
virt_to_page((void *)addr), buflen, off);
}
}
#define CIFS_OPARMS(_cifs_sb, _tcon, _path, _da, _cd, _co, _mode) \
((struct cifs_open_parms) { \
.tcon = _tcon, \
.path = _path, \
.desired_access = (_da), \
.disposition = (_cd), \
.create_options = cifs_create_options(_cifs_sb, (_co)), \
.mode = (_mode), \
.cifs_sb = _cifs_sb, \
})
struct smb2_compound_vars {
struct cifs_open_parms oparms;
struct kvec rsp_iov[MAX_COMPOUND];
struct smb_rqst rqst[MAX_COMPOUND];
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct kvec qi_iov;
struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
struct kvec close_iov;
struct smb2_file_rename_info rename_info;
struct smb2_file_link_info link_info;
struct kvec ea_iov;
};
static inline bool cifs_ses_exiting(struct cifs_ses *ses)
{
bool ret;
spin_lock(&ses->ses_lock);
ret = ses->ses_status == SES_EXITING;
spin_unlock(&ses->ses_lock);
return ret;
}
#endif /* _CIFS_GLOB_H */
|
/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
/*
Written 1993-2000 by Donald Becker.
Copyright 1994-2000 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
This driver is for the 3Com EtherLinkIII series.
The author may be reached as [email protected], or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Known limitations:
Because of the way 3c509 ISA detection works it's difficult to predict
a priori which of several ISA-mode cards will be detected first.
This driver does not use predictive interrupt mode, resulting in higher
packet latency but lower overhead. If interrupts are disabled for an
unusually long time it could also result in missed packets, but in
practice this rarely happens.
FIXES:
Alan Cox: Removed the 'Unexpected interrupt' bug.
Michael Meskes: Upgraded to Donald Becker's version 1.07.
Alan Cox: Increased the eeprom delay. Regardless of
what the docs say some people definitely
get problems with lower (but in card spec)
delays
v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
other cleanups. -djb
Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
Rick Payne: Fixed SMP race condition
v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
v1.15 1/31/98 Faster recovery for Tx errors. -djb
v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
v1.18 12Mar2001 Andrew Morton
- Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz)
- Reviewed against 1.18 from scyld.com
v1.18a 17Nov2001 Jeff Garzik <[email protected]>
- ethtool support
v1.18b 1Mar2002 Zwane Mwaikambo <[email protected]>
- Power Management support
v1.18c 1Mar2002 David Ruggiero <[email protected]>
- Full duplex support
v1.19 16Oct2002 Zwane Mwaikambo <[email protected]>
- Additional ethtool features
v1.19a 28Oct2002 Davud Ruggiero <[email protected]>
- Increase *read_eeprom udelay to workaround oops with 2 cards.
v1.19b 08Nov2002 Marc Zyngier <[email protected]>
- Introduce driver model for EISA cards.
v1.20 04Feb2008 Ondrej Zary <[email protected]>
- convert to isa_driver and pnp_driver and some cleanups
*/
#define DRV_NAME "3c509"
/* A few values that may be tweaked. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (400*HZ/1000)
#include <linux/module.h>
#include <linux/isa.h>
#include <linux/pnp.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pm.h>
#include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#ifdef EL3_DEBUG
static int el3_debug = EL3_DEBUG;
#else
static int el3_debug = 2;
#endif
/* Used to do a global count of all the cards in the system. Must be
* a global variable so that the eisa probe routines can increment
* it */
static int el3_cards = 0;
#define EL3_MAX_CARDS 8
/* To minimize the size of the driver source I only define operating
constants if they are used several times. You'll need the manual
anyway if you want to understand driver details. */
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_CMD 0x0e
#define EL3_STATUS 0x0e
#define EEPROM_READ 0x80
#define EL3_IO_EXTENT 16
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11,
PowerDown = 28<<11, PowerAuto = 29<<11};
enum c509status {
IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
#define RX_STATUS 0x08
#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */
#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */
#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */
/*
* Must be a power of two (we use a binary and in the
* circular queue)
*/
#define SKB_QUEUE_SIZE 64
enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_EISA };
struct el3_private {
spinlock_t lock;
/* skb send-queue */
int head, size;
struct sk_buff *queue[SKB_QUEUE_SIZE];
enum el3_cardtype type;
};
static int id_port;
static int current_tag;
static struct net_device *el3_devs[EL3_MAX_CARDS];
/* Parameters that may be passed into the module. */
static int debug = -1;
static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 10;
#ifdef CONFIG_PNP
static int nopnp;
#endif
static int el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
static int el3_open(struct net_device *dev);
static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t el3_interrupt(int irq, void *dev_id);
static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void el3_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void el3_down(struct net_device *dev);
static void el3_up(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
#ifdef CONFIG_PM
static int el3_suspend(struct device *, pm_message_t);
static int el3_resume(struct device *);
#else
#define el3_suspend NULL
#define el3_resume NULL
#endif
/* generic device remove for all device types */
static int el3_device_remove (struct device *device);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void el3_poll_controller(struct net_device *dev);
#endif
/* Return 0 on success, 1 on error, 2 when found already detected PnP card */
static int el3_isa_id_sequence(__be16 *phys_addr)
{
short lrs_state = 0xff;
int i;
/* ISA boards are detected by sending the ID sequence to the
ID_PORT. We find cards past the first by setting the 'current_tag'
on cards as they are found. Cards with their tag set will not
respond to subsequent ID sequences. */
outb(0x00, id_port);
outb(0x00, id_port);
for (i = 0; i < 255; i++) {
outb(lrs_state, id_port);
lrs_state <<= 1;
lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
}
/* For the first probe, clear all board's tag registers. */
if (current_tag == 0)
outb(0xd0, id_port);
else /* Otherwise kill off already-found boards. */
outb(0xd8, id_port);
if (id_read_eeprom(7) != 0x6d50)
return 1;
/* Read in EEPROM data, which does contention-select.
Only the lowest address board will stay "on-line".
3Com got the byte order backwards. */
for (i = 0; i < 3; i++)
phys_addr[i] = htons(id_read_eeprom(i));
#ifdef CONFIG_PNP
if (!nopnp) {
/* The ISA PnP 3c509 cards respond to the ID sequence too.
This check is needed in order not to register them twice. */
for (i = 0; i < el3_cards; i++) {
struct el3_private *lp = netdev_priv(el3_devs[i]);
if (lp->type == EL3_PNP &&
ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) {
if (el3_debug > 3)
pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
phys_addr[1] & 0xff, phys_addr[1] >> 8,
phys_addr[2] & 0xff, phys_addr[2] >> 8);
/* Set the adaptor tag so that the next card can be found. */
outb(0xd0 + ++current_tag, id_port);
return 2;
}
}
}
#endif /* CONFIG_PNP */
return 0;
}
static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
int irq, int if_port, enum el3_cardtype type)
{
struct el3_private *lp = netdev_priv(dev);
eth_hw_addr_set(dev, (u8 *)phys_addr);
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = if_port;
lp->type = type;
}
static int el3_isa_match(struct device *pdev, unsigned int ndev)
{
struct net_device *dev;
int ioaddr, isa_irq, if_port, err;
unsigned int iobase;
__be16 phys_addr[3];
while ((err = el3_isa_id_sequence(phys_addr)) == 2)
; /* Skip to next card when PnP card found */
if (err == 1)
return 0;
iobase = id_read_eeprom(8);
if_port = iobase >> 14;
ioaddr = 0x200 + ((iobase & 0x1f) << 4);
if (irq[el3_cards] > 1 && irq[el3_cards] < 16)
isa_irq = irq[el3_cards];
else
isa_irq = id_read_eeprom(9) >> 12;
dev = alloc_etherdev(sizeof(struct el3_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, pdev);
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
free_netdev(dev);
return 0;
}
/* Set the adaptor tag so that the next card can be found. */
outb(0xd0 + ++current_tag, id_port);
/* Activate the adaptor at the EEPROM location. */
outb((ioaddr >> 4) | 0xe0, id_port);
EL3WINDOW(0);
if (inw(ioaddr) != 0x6d50) {
free_netdev(dev);
return 0;
}
/* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ);
el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA);
dev_set_drvdata(pdev, dev);
if (el3_common_init(dev)) {
free_netdev(dev);
return 0;
}
el3_devs[el3_cards++] = dev;
return 1;
}
static void el3_isa_remove(struct device *pdev,
unsigned int ndev)
{
el3_device_remove(pdev);
dev_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int el3_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
current_tag = 0;
return el3_suspend(dev, state);
}
static int el3_isa_resume(struct device *dev, unsigned int n)
{
struct net_device *ndev = dev_get_drvdata(dev);
int ioaddr = ndev->base_addr, err;
__be16 phys_addr[3];
while ((err = el3_isa_id_sequence(phys_addr)) == 2)
; /* Skip to next card when PnP card found */
if (err == 1)
return 0;
/* Set the adaptor tag so that the next card can be found. */
outb(0xd0 + ++current_tag, id_port);
/* Enable the card */
outb((ioaddr >> 4) | 0xe0, id_port);
EL3WINDOW(0);
if (inw(ioaddr) != 0x6d50)
return 1;
/* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ);
return el3_resume(dev);
}
#endif
static struct isa_driver el3_isa_driver = {
.match = el3_isa_match,
.remove = el3_isa_remove,
#ifdef CONFIG_PM
.suspend = el3_isa_suspend,
.resume = el3_isa_resume,
#endif
.driver = {
.name = "3c509"
},
};
static int isa_registered;
#ifdef CONFIG_PNP
static const struct pnp_device_id el3_pnp_ids[] = {
{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
{ .id = "TCM5091" }, /* 3Com Etherlink III */
{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
{ .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */
{ .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */
{ .id = "PNP80f7" }, /* 3Com Etherlink III compatible */
{ .id = "PNP80f8" }, /* 3Com Etherlink III compatible */
{ .id = "" }
};
MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
{
short i;
int ioaddr, irq, if_port;
__be16 phys_addr[3];
struct net_device *dev = NULL;
int err;
ioaddr = pnp_port_start(pdev, 0);
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp"))
return -EBUSY;
irq = pnp_irq(pdev, 0);
EL3WINDOW(0);
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
if_port = read_eeprom(ioaddr, 8) >> 14;
dev = alloc_etherdev(sizeof(struct el3_private));
if (!dev) {
release_region(ioaddr, EL3_IO_EXTENT);
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP);
pnp_set_drvdata(pdev, dev);
err = el3_common_init(dev);
if (err) {
pnp_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
}
el3_devs[el3_cards++] = dev;
return 0;
}
static void el3_pnp_remove(struct pnp_dev *pdev)
{
el3_common_remove(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
{
return el3_suspend(&pdev->dev, state);
}
static int el3_pnp_resume(struct pnp_dev *pdev)
{
return el3_resume(&pdev->dev);
}
#endif
static struct pnp_driver el3_pnp_driver = {
.name = "3c509",
.id_table = el3_pnp_ids,
.probe = el3_pnp_probe,
.remove = el3_pnp_remove,
#ifdef CONFIG_PM
.suspend = el3_pnp_suspend,
.resume = el3_pnp_resume,
#endif
};
static int pnp_registered;
#endif /* CONFIG_PNP */
#ifdef CONFIG_EISA
static const struct eisa_device_id el3_eisa_ids[] = {
{ "TCM5090" },
{ "TCM5091" },
{ "TCM5092" },
{ "TCM5093" },
{ "TCM5094" },
{ "TCM5095" },
{ "TCM5098" },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
static int el3_eisa_probe (struct device *device);
static struct eisa_driver el3_eisa_driver = {
.id_table = el3_eisa_ids,
.driver = {
.name = "3c579",
.probe = el3_eisa_probe,
.remove = el3_device_remove,
.suspend = el3_suspend,
.resume = el3_resume,
}
};
static int eisa_registered;
#endif
static const struct net_device_ops netdev_ops = {
.ndo_open = el3_open,
.ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
.ndo_get_stats = el3_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = el3_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = el3_poll_controller,
#endif
};
static int el3_common_init(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int err;
static const char * const if_names[] = {
"10baseT", "AUI", "undefined", "BNC"
};
spin_lock_init(&lp->lock);
if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */
dev->if_port = (dev->mem_start & 0x0f);
} else { /* xcvr codes 0/8 */
/* use eeprom value, but save user's full-duplex selection */
dev->if_port |= (dev->mem_start & 0x08);
}
/* The EL3-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = ðtool_ops;
err = register_netdev(dev);
if (err) {
pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n",
dev->base_addr, dev->irq);
release_region(dev->base_addr, EL3_IO_EXTENT);
return err;
}
pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n",
dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
dev->dev_addr, dev->irq);
return 0;
}
static void el3_common_remove (struct net_device *dev)
{
unregister_netdev (dev);
release_region(dev->base_addr, EL3_IO_EXTENT);
free_netdev (dev);
}
#ifdef CONFIG_EISA
static int el3_eisa_probe(struct device *device)
{
short i;
int ioaddr, irq, if_port;
__be16 phys_addr[3];
struct net_device *dev = NULL;
struct eisa_device *edev;
int err;
/* Yeepee, The driver framework is calling us ! */
edev = to_eisa_device (device);
ioaddr = edev->base_addr;
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa"))
return -EBUSY;
/* Change the register set to the configuration window 0. */
outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
irq = inw(ioaddr + WN0_IRQ) >> 12;
if_port = inw(ioaddr + 6)>>14;
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
/* Restore the "Product ID" to the EEPROM read register. */
read_eeprom(ioaddr, 3);
dev = alloc_etherdev(sizeof (struct el3_private));
if (dev == NULL) {
release_region(ioaddr, EL3_IO_EXTENT);
return -ENOMEM;
}
SET_NETDEV_DEV(dev, device);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
eisa_set_drvdata (edev, dev);
err = el3_common_init(dev);
if (err) {
eisa_set_drvdata (edev, NULL);
free_netdev(dev);
return err;
}
el3_devs[el3_cards++] = dev;
return 0;
}
#endif
/* This remove works for all device types.
*
* The net dev must be stored in the driver data field */
static int el3_device_remove(struct device *device)
{
struct net_device *dev;
dev = dev_get_drvdata(device);
el3_common_remove (dev);
return 0;
}
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
static ushort read_eeprom(int ioaddr, int index)
{
outw(EEPROM_READ + index, ioaddr + 10);
/* Pause for at least 162 us. for the read to take place.
Some chips seem to require much longer */
mdelay(2);
return inw(ioaddr + 12);
}
/* Read a word from the EEPROM when in the ISA ID probe state. */
static ushort id_read_eeprom(int index)
{
int bit, word = 0;
/* Issue read command, and pause for at least 162 us. for it to complete.
Assume extra-fast 16Mhz bus. */
outb(EEPROM_READ + index, id_port);
/* Pause for at least 162 us. for the read to take place. */
/* Some chips seem to require much longer */
mdelay(4);
for (bit = 15; bit >= 0; bit--)
word = (word << 1) + (inb(id_port) & 0x01);
if (el3_debug > 3)
pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
return word;
}
static int
el3_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
int i;
outw(TxReset, ioaddr + EL3_CMD);
outw(RxReset, ioaddr + EL3_CMD);
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev);
if (i)
return i;
EL3WINDOW(0);
if (el3_debug > 3)
pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
el3_up(dev);
if (el3_debug > 3)
pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n",
dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
return 0;
}
static void
el3_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
int ioaddr = dev->base_addr;
/* Transmitter timeout, serious problems. */
pr_warn("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d\n",
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
netif_wake_queue(dev);
}
static netdev_tx_t
el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
unsigned long flags;
netif_stop_queue (dev);
dev->stats.tx_bytes += skb->len;
if (el3_debug > 4) {
pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
dev->name, skb->len, inw(ioaddr + EL3_STATUS));
}
/*
* We lock the driver against other processors. Note
* we don't need to lock versus the IRQ as we suspended
* that. This means that we lose the ability to take
* an RX during a TX upload. That sucks a bit with SMP
* on an original 3c509 (2K buffer)
*
* Using disable_irq stops us crapping on other
* time sensitive devices.
*/
spin_lock_irqsave(&lp->lock, flags);
/* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0x00, ioaddr + TX_FIFO);
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
if (inw(ioaddr + TX_FREE) > 1536)
netif_start_queue(dev);
else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&lp->lock, flags);
dev_consume_skb_any (skb);
/* Clear the Tx status stack. */
{
short tx_status;
int i = 4;
while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
}
return NETDEV_TX_OK;
}
/* The EL3 interrupt handler. */
static irqreturn_t
el3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct el3_private *lp;
int ioaddr, status;
int i = max_interrupt_work;
lp = netdev_priv(dev);
spin_lock(&lp->lock);
ioaddr = dev->base_addr;
if (el3_debug > 4) {
status = inw(ioaddr + EL3_STATUS);
pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status);
}
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
if (status & RxComplete)
el3_rx(dev);
if (status & TxAvailable) {
if (el3_debug > 5)
pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
}
if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
update_stats(dev);
if (status & RxEarly) { /* Rx early is unused. */
el3_rx(dev);
outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & TxComplete) { /* Really Tx error. */
short tx_status;
int i = 4;
while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
}
if (status & AdapterFailure) {
/* Adapter failure requires Rx reset and reinit. */
outw(RxReset, ioaddr + EL3_CMD);
/* Set the Rx filter to the current state. */
outw(SetRxFilter | RxStation | RxBroadcast
| (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
| (dev->flags & IFF_PROMISC ? RxProm : 0),
ioaddr + EL3_CMD);
outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
if (--i < 0) {
pr_err("%s: Infinite loop in interrupt, status %4.4x.\n",
dev->name, status);
/* Clear all interrupts. */
outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
}
if (el3_debug > 4) {
pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name,
inw(ioaddr + EL3_STATUS));
}
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void el3_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
el3_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static struct net_device_stats *
el3_get_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
unsigned long flags;
/*
* This is fast enough not to bother with disable IRQ
* stuff.
*/
spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return &dev->stats;
}
/* Update statistics. We change to register window 6, so this should be run
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
*/
static void update_stats(struct net_device *dev)
{
int ioaddr = dev->base_addr;
if (el3_debug > 5)
pr_debug(" Updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
/* Multiple collisions. */ inb(ioaddr + 2);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
dev->stats.tx_packets += inb(ioaddr + 6);
/* Rx packets */ inb(ioaddr + 7);
/* Tx deferrals */ inb(ioaddr + 8);
inw(ioaddr + 10); /* Total Rx and Tx octets. */
inw(ioaddr + 12);
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
}
static int
el3_rx(struct net_device *dev)
{
int ioaddr = dev->base_addr;
short rx_status;
if (el3_debug > 5)
pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
short error = rx_status & 0x3800;
outw(RxDiscard, ioaddr + EL3_CMD);
dev->stats.rx_errors++;
switch (error) {
case 0x0000: dev->stats.rx_over_errors++; break;
case 0x0800: dev->stats.rx_length_errors++; break;
case 0x1000: dev->stats.rx_frame_errors++; break;
case 0x1800: dev->stats.rx_length_errors++; break;
case 0x2000: dev->stats.rx_frame_errors++; break;
case 0x2800: dev->stats.rx_crc_errors++; break;
}
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 5);
if (el3_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte */
/* 'skb->data' points to the start of sk_buff data area. */
insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
(pkt_len + 3) >> 2);
outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
continue;
}
outw(RxDiscard, ioaddr + EL3_CMD);
dev->stats.rx_dropped++;
if (el3_debug)
pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n",
dev->name, pkt_len);
}
inw(ioaddr + EL3_STATUS); /* Delay. */
while (inw(ioaddr + EL3_STATUS) & 0x1000)
pr_debug(" Waiting for 3c509 to discard packet, status %x.\n",
inw(ioaddr + EL3_STATUS) );
}
return 0;
}
/*
* Set or clear the multicast filter for this adaptor.
*/
static void
set_multicast_list(struct net_device *dev)
{
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
if (old != mc_count) {
old = mc_count;
pr_debug("%s: Setting Rx mode to %d addresses.\n",
dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
if (dev->flags&IFF_PROMISC) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&lp->lock, flags);
}
static int
el3_close(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
if (el3_debug > 2)
pr_debug("%s: Shutting down ethercard.\n", dev->name);
el3_down(dev);
free_irq(dev->irq, dev);
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
if (lp->type != EL3_EISA) {
/* But we explicitly zero the IRQ line select anyway. Don't do
* it on EISA cards, it prevents the module from getting an
* IRQ after unload+reload... */
outw(0x0f00, ioaddr + WN0_IRQ);
}
return 0;
}
static int
el3_link_ok(struct net_device *dev)
{
int ioaddr = dev->base_addr;
u16 tmp;
EL3WINDOW(4);
tmp = inw(ioaddr + WN4_MEDIA);
EL3WINDOW(1);
return tmp & (1<<11);
}
static void
el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
{
u16 tmp;
int ioaddr = dev->base_addr;
u32 supported;
EL3WINDOW(0);
/* obtain current transceiver via WN4_MEDIA? */
tmp = inw(ioaddr + WN0_ADDR_CONF);
switch (tmp >> 14) {
case 0:
cmd->base.port = PORT_TP;
break;
case 1:
cmd->base.port = PORT_AUI;
break;
case 3:
cmd->base.port = PORT_BNC;
break;
default:
break;
}
cmd->base.duplex = DUPLEX_HALF;
supported = 0;
tmp = inw(ioaddr + WN0_CONF_CTRL);
if (tmp & (1<<13))
supported |= SUPPORTED_AUI;
if (tmp & (1<<12))
supported |= SUPPORTED_BNC;
if (tmp & (1<<9)) {
supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full; /* hmm... */
EL3WINDOW(4);
tmp = inw(ioaddr + WN4_NETDIAG);
if (tmp & FD_ENABLE)
cmd->base.duplex = DUPLEX_FULL;
}
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
cmd->base.speed = SPEED_10;
EL3WINDOW(1);
}
static int
el3_netdev_set_ecmd(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
u16 tmp;
int ioaddr = dev->base_addr;
if (cmd->base.speed != SPEED_10)
return -EINVAL;
if ((cmd->base.duplex != DUPLEX_HALF) &&
(cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
/* change XCVR type */
EL3WINDOW(0);
tmp = inw(ioaddr + WN0_ADDR_CONF);
switch (cmd->base.port) {
case PORT_TP:
tmp &= ~(3<<14);
dev->if_port = 0;
break;
case PORT_AUI:
tmp |= (1<<14);
dev->if_port = 1;
break;
case PORT_BNC:
tmp |= (3<<14);
dev->if_port = 3;
break;
default:
return -EINVAL;
}
outw(tmp, ioaddr + WN0_ADDR_CONF);
if (dev->if_port == 3) {
/* fire up the DC-DC convertor if BNC gets enabled */
tmp = inw(ioaddr + WN0_ADDR_CONF);
if (tmp & (3 << 14)) {
outw(StartCoax, ioaddr + EL3_CMD);
udelay(800);
} else
return -EIO;
}
EL3WINDOW(4);
tmp = inw(ioaddr + WN4_NETDIAG);
if (cmd->base.duplex == DUPLEX_FULL)
tmp |= FD_ENABLE;
else
tmp &= ~FD_ENABLE;
outw(tmp, ioaddr + WN4_NETDIAG);
EL3WINDOW(1);
return 0;
}
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock);
el3_netdev_get_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
return 0;
}
static int el3_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
ret = el3_netdev_set_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
return ret;
}
static u32 el3_get_link(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
u32 ret;
spin_lock_irq(&lp->lock);
ret = el3_link_ok(dev);
spin_unlock_irq(&lp->lock);
return ret;
}
static u32 el3_get_msglevel(struct net_device *dev)
{
return el3_debug;
}
static void el3_set_msglevel(struct net_device *dev, u32 v)
{
el3_debug = v;
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = el3_get_drvinfo,
.get_link = el3_get_link,
.get_msglevel = el3_get_msglevel,
.set_msglevel = el3_set_msglevel,
.get_link_ksettings = el3_get_link_ksettings,
.set_link_ksettings = el3_set_link_ksettings,
};
static void
el3_down(struct net_device *dev)
{
int ioaddr = dev->base_addr;
netif_stop_queue(dev);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
if (dev->if_port == 3)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
else if (dev->if_port == 0) {
/* Disable link beat and jabber, if_port may change here next open(). */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
}
outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(dev);
}
static void
el3_up(struct net_device *dev)
{
int i, sw_info, net_diag;
int ioaddr = dev->base_addr;
/* Activating the board required and does no harm otherwise */
outw(0x0001, ioaddr + 4);
/* Set the IRQ line. */
outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
/* Set the station address in window 2 each time opened. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
if ((dev->if_port & 0x03) == 3) /* BNC interface */
/* Start the thinnet transceiver. We should really wait 50ms...*/
outw(StartCoax, ioaddr + EL3_CMD);
else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */
/* Combine secondary sw_info word (the adapter level) and primary
sw_info word (duplex setting plus other useless bits) */
EL3WINDOW(0);
sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) |
(read_eeprom(ioaddr, 0x0d) & 0xBff0);
EL3WINDOW(4);
net_diag = inw(ioaddr + WN4_NETDIAG);
net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */
pr_info("%s: ", dev->name);
switch (dev->if_port & 0x0c) {
case 12:
/* force full-duplex mode if 3c5x9b */
if (sw_info & 0x000f) {
pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
fallthrough;
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
fallthrough;
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */
}
outw(net_diag, ioaddr + WN4_NETDIAG);
pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info);
if (el3_debug > 3)
pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag);
/* Enable link beat and jabber check. */
outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
}
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
for (i = 0; i < 9; i++)
inb(ioaddr + i);
inw(ioaddr + 10);
inw(ioaddr + 12);
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
/* Accept b-case and phys addr only. */
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
/* Ack all pending events, and set active indicator mask. */
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
ioaddr + EL3_CMD);
netif_start_queue(dev);
}
/* Power Management support functions */
#ifdef CONFIG_PM
static int
el3_suspend(struct device *pdev, pm_message_t state)
{
unsigned long flags;
struct net_device *dev;
struct el3_private *lp;
int ioaddr;
dev = dev_get_drvdata(pdev);
lp = netdev_priv(dev);
ioaddr = dev->base_addr;
spin_lock_irqsave(&lp->lock, flags);
if (netif_running(dev))
netif_device_detach(dev);
el3_down(dev);
outw(PowerDown, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
static int
el3_resume(struct device *pdev)
{
unsigned long flags;
struct net_device *dev;
struct el3_private *lp;
int ioaddr;
dev = dev_get_drvdata(pdev);
lp = netdev_priv(dev);
ioaddr = dev->base_addr;
spin_lock_irqsave(&lp->lock, flags);
outw(PowerUp, ioaddr + EL3_CMD);
EL3WINDOW(0);
el3_up(dev);
if (netif_running(dev))
netif_device_attach(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
#endif /* CONFIG_PM */
module_param(debug,int, 0);
module_param_hw_array(irq, int, irq, NULL, 0);
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(debug, "debug level (0-6)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
#ifdef CONFIG_PNP
module_param(nopnp, int, 0);
MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)");
#endif /* CONFIG_PNP */
MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver");
MODULE_LICENSE("GPL");
static int __init el3_init_module(void)
{
int ret = 0;
if (debug >= 0)
el3_debug = debug;
#ifdef CONFIG_PNP
if (!nopnp) {
ret = pnp_register_driver(&el3_pnp_driver);
if (!ret)
pnp_registered = 1;
}
#endif
/* Select an open I/O location at 0x1*0 to do ISA contention select. */
/* Start with 0x110 to avoid some sound cards.*/
for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) {
if (!request_region(id_port, 1, "3c509-control"))
continue;
outb(0x00, id_port);
outb(0xff, id_port);
if (inb(id_port) & 0x01)
break;
else
release_region(id_port, 1);
}
if (id_port >= 0x200) {
id_port = 0;
pr_err("No I/O port available for 3c509 activation.\n");
} else {
ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS);
if (!ret)
isa_registered = 1;
}
#ifdef CONFIG_EISA
ret = eisa_driver_register(&el3_eisa_driver);
if (!ret)
eisa_registered = 1;
#endif
#ifdef CONFIG_PNP
if (pnp_registered)
ret = 0;
#endif
if (isa_registered)
ret = 0;
#ifdef CONFIG_EISA
if (eisa_registered)
ret = 0;
#endif
return ret;
}
static void __exit el3_cleanup_module(void)
{
#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_driver(&el3_pnp_driver);
#endif
if (isa_registered)
isa_unregister_driver(&el3_isa_driver);
if (id_port)
release_region(id_port, 1);
#ifdef CONFIG_EISA
if (eisa_registered)
eisa_driver_unregister(&el3_eisa_driver);
#endif
}
module_init (el3_init_module);
module_exit (el3_cleanup_module);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright Altera Corporation (C) 2013-2014. All rights reserved
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "altera-mailbox"
#define MAILBOX_CMD_REG 0x00
#define MAILBOX_PTR_REG 0x04
#define MAILBOX_STS_REG 0x08
#define MAILBOX_INTMASK_REG 0x0C
#define INT_PENDING_MSK 0x1
#define INT_SPACE_MSK 0x2
#define STS_PENDING_MSK 0x1
#define STS_FULL_MSK 0x2
#define STS_FULL_OFT 0x1
#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
enum altera_mbox_msg {
MBOX_CMD = 0,
MBOX_PTR,
};
#define MBOX_POLLING_MS 5 /* polling interval 5ms */
struct altera_mbox {
bool is_sender; /* 1-sender, 0-receiver */
bool intr_mode;
int irq;
void __iomem *mbox_base;
struct device *dev;
struct mbox_controller controller;
/* If the controller supports only RX polling mode */
struct timer_list rxpoll_timer;
struct mbox_chan *chan;
};
static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
{
if (!chan || !chan->con_priv)
return NULL;
return (struct altera_mbox *)chan->con_priv;
}
static inline int altera_mbox_full(struct altera_mbox *mbox)
{
u32 status;
status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
return MBOX_FULL(status);
}
static inline int altera_mbox_pending(struct altera_mbox *mbox)
{
u32 status;
status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
return MBOX_PENDING(status);
}
static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
{
u32 mask;
mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
if (enable)
mask |= INT_PENDING_MSK;
else
mask &= ~INT_PENDING_MSK;
writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
}
static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
{
u32 mask;
mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
if (enable)
mask |= INT_SPACE_MSK;
else
mask &= ~INT_SPACE_MSK;
writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
}
static bool altera_mbox_is_sender(struct altera_mbox *mbox)
{
u32 reg;
/* Write a magic number to PTR register and read back this register.
* This register is read-write if it is a sender.
*/
#define MBOX_MAGIC 0xA5A5AA55
writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
if (reg == MBOX_MAGIC) {
/* Clear to 0 */
writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
return true;
}
return false;
}
static void altera_mbox_rx_data(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
u32 data[2];
if (altera_mbox_pending(mbox)) {
data[MBOX_PTR] =
readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
data[MBOX_CMD] =
readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
mbox_chan_received_data(chan, (void *)data);
}
}
static void altera_mbox_poll_rx(struct timer_list *t)
{
struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
}
static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = (struct mbox_chan *)p;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
altera_mbox_tx_intmask(mbox, false);
mbox_chan_txdone(chan, 0);
return IRQ_HANDLED;
}
static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
{
struct mbox_chan *chan = (struct mbox_chan *)p;
altera_mbox_rx_data(chan);
return IRQ_HANDLED;
}
static int altera_mbox_startup_sender(struct mbox_chan *chan)
{
int ret;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
DRIVER_NAME, chan);
if (unlikely(ret)) {
dev_err(mbox->dev,
"failed to register mailbox interrupt:%d\n",
ret);
return ret;
}
}
return 0;
}
static int altera_mbox_startup_receiver(struct mbox_chan *chan)
{
int ret;
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
DRIVER_NAME, chan);
if (unlikely(ret)) {
mbox->intr_mode = false;
goto polling; /* use polling if failed */
}
altera_mbox_rx_intmask(mbox, true);
return 0;
}
polling:
/* Setup polling timer */
mbox->chan = chan;
timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
return 0;
}
static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
u32 *udata = (u32 *)data;
if (!mbox || !data)
return -EINVAL;
if (!mbox->is_sender) {
dev_warn(mbox->dev,
"failed to send. This is receiver mailbox.\n");
return -EINVAL;
}
if (altera_mbox_full(mbox))
return -EBUSY;
/* Enable interrupt before send */
if (mbox->intr_mode)
altera_mbox_tx_intmask(mbox, true);
/* Pointer register must write before command register */
writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
return 0;
}
static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
/* Return false if mailbox is full */
return altera_mbox_full(mbox) ? false : true;
}
static bool altera_mbox_peek_data(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
return altera_mbox_pending(mbox) ? true : false;
}
static int altera_mbox_startup(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
int ret = 0;
if (!mbox)
return -EINVAL;
if (mbox->is_sender)
ret = altera_mbox_startup_sender(chan);
else
ret = altera_mbox_startup_receiver(chan);
return ret;
}
static void altera_mbox_shutdown(struct mbox_chan *chan)
{
struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
if (mbox->intr_mode) {
/* Unmask all interrupt masks */
writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
free_irq(mbox->irq, chan);
} else if (!mbox->is_sender) {
del_timer_sync(&mbox->rxpoll_timer);
}
}
static const struct mbox_chan_ops altera_mbox_ops = {
.send_data = altera_mbox_send_data,
.startup = altera_mbox_startup,
.shutdown = altera_mbox_shutdown,
.last_tx_done = altera_mbox_last_tx_done,
.peek_data = altera_mbox_peek_data,
};
static int altera_mbox_probe(struct platform_device *pdev)
{
struct altera_mbox *mbox;
struct mbox_chan *chans;
int ret;
mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
GFP_KERNEL);
if (!mbox)
return -ENOMEM;
/* Allocated one channel */
chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
/* Check is it a sender or receiver? */
mbox->is_sender = altera_mbox_is_sender(mbox);
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq >= 0)
mbox->intr_mode = true;
mbox->dev = &pdev->dev;
/* Hardware supports only one channel. */
chans[0].con_priv = mbox;
mbox->controller.dev = mbox->dev;
mbox->controller.num_chans = 1;
mbox->controller.chans = chans;
mbox->controller.ops = &altera_mbox_ops;
if (mbox->is_sender) {
if (mbox->intr_mode) {
mbox->controller.txdone_irq = true;
} else {
mbox->controller.txdone_poll = true;
mbox->controller.txpoll_period = MBOX_POLLING_MS;
}
}
ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
if (ret) {
dev_err(&pdev->dev, "Register mailbox failed\n");
goto err;
}
platform_set_drvdata(pdev, mbox);
err:
return ret;
}
static const struct of_device_id altera_mbox_match[] = {
{ .compatible = "altr,mailbox-1.0" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, altera_mbox_match);
static struct platform_driver altera_mbox_driver = {
.probe = altera_mbox_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = altera_mbox_match,
},
};
module_platform_driver(altera_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Altera mailbox specific functions");
MODULE_AUTHOR("Ley Foon Tan <[email protected]>");
MODULE_ALIAS("platform:altera-mailbox");
|
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2021-2022 TQ-Systems GmbH
* Author: Alexander Stein <[email protected]>
*/
/dts-v1/;
#include <dt-bindings/leds/common.h>
#include <dt-bindings/net/ti-dp83867.h>
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mp-tqma8mpql.dtsi"
/ {
model = "TQ-Systems i.MX8MPlus TQMa8MPxL on MBa8MPxL";
compatible = "tq,imx8mp-tqma8mpql-mba8mpxl", "tq,imx8mp-tqma8mpql", "fsl,imx8mp";
chassis-type = "embedded";
chosen {
stdout-path = &uart4;
};
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc 0>, <&adc 1>;
};
aliases {
mmc0 = &usdhc3;
mmc1 = &usdhc2;
mmc2 = &usdhc1;
rtc0 = &pcf85063;
rtc1 = &snvs_rtc;
spi0 = &flexspi;
spi1 = &ecspi1;
spi2 = &ecspi2;
spi3 = &ecspi3;
};
backlight_lvds: backlight {
compatible = "pwm-backlight";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_backlight>;
pwms = <&pwm2 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <®_vcc_12v0>;
enable-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
clk_xtal25: clk-xtal25 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <25000000>;
};
connector {
compatible = "gpio-usb-b-connector", "usb-b-connector";
type = "micro";
label = "X29";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbcon0>;
id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
port {
usb_dr_connector: endpoint {
remote-endpoint = <&usb3_dwc>;
};
};
};
fan0: pwm-fan {
compatible = "pwm-fan";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwmfan>;
fan-supply = <®_pwm_fan>;
#cooling-cells = <2>;
/* typical 25 kHz -> 40.000 nsec */
pwms = <&pwm3 0 40000 PWM_POLARITY_INVERTED>;
cooling-levels = <0 32 64 128 196 240>;
pulses-per-revolution = <2>;
interrupt-parent = <&gpio5>;
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
status = "disabled";
};
gpio-keys {
compatible = "gpio-keys";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpiobutton>;
autorepeat;
switch-1 {
label = "S12";
linux,code = <BTN_0>;
gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
wakeup-source;
};
switch-2 {
label = "S13";
linux,code = <BTN_1>;
gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
wakeup-source;
};
};
gpio-leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpioled>;
led-0 {
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_STATUS;
function-enumerator = <0>;
gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "default-on";
};
led-1 {
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_HEARTBEAT;
gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
led-2 {
color = <LED_COLOR_ID_YELLOW>;
function = LED_FUNCTION_STATUS;
function-enumerator = <1>;
gpios = <&gpio5 3 GPIO_ACTIVE_HIGH>;
};
};
hdmi-connector {
compatible = "hdmi-connector";
label = "X44";
type = "a";
port {
hdmi_connector_in: endpoint {
remote-endpoint = <&hdmi_tx_out>;
};
};
};
display: display {
/*
* Display is not fixed, so compatible has to be added from
* DT overlay
*/
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lvdsdisplay>;
power-supply = <®_vcc_3v3>;
enable-gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
backlight = <&backlight_lvds>;
status = "disabled";
};
reg_pwm_fan: regulator-pwm-fan {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_regpwmfan>;
regulator-name = "FAN_PWR";
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
gpio = <&gpio4 27 GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <®_vcc_12v0>;
};
reg_usdhc2_vmmc: regulator-usdhc2 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
regulator-name = "VSD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
startup-delay-us = <100>;
off-on-delay-us = <12000>;
};
reg_vcc_12v0: regulator-12v0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg12v0>;
regulator-name = "VCC_12V0";
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
gpio = <&gpio2 6 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
reg_vcc_1v8: regulator-1v8 {
compatible = "regulator-fixed";
regulator-name = "VCC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
reg_vcc_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
reg_vcc_5v0: regulator-5v0 {
compatible = "regulator-fixed";
regulator-name = "VCC_5V0";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/* global autoconfigured region for contiguous allocations */
linux,cma {
compatible = "shared-dma-pool";
reusable;
size = <0 0x38000000>;
alloc-ranges = <0 0x40000000 0 0xB0000000>;
linux,cma-default;
};
};
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
model = "tq-tlv320aic32x";
audio-cpu = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
thermal-zones {
soc-thermal {
trips {
soc_active0: trip-active0 {
temperature = <40000>;
hysteresis = <5000>;
type = "active";
};
soc_active1: trip-active1 {
temperature = <48000>;
hysteresis = <3000>;
type = "active";
};
soc_active2: trip-active2 {
temperature = <60000>;
hysteresis = <10000>;
type = "active";
};
};
cooling-maps {
map1 {
trip = <&soc_active0>;
cooling-device = <&fan0 1 1>;
};
map2 {
trip = <&soc_active1>;
cooling-device = <&fan0 2 2>;
};
map3 {
trip = <&soc_active2>;
cooling-device = <&fan0 3 3>;
};
};
};
};
};
&ecspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1>;
cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
status = "okay";
};
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi2>;
cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
};
&ecspi3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi3>;
cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>;
status = "okay";
adc: adc@0 {
reg = <0>;
compatible = "microchip,mcp3202";
/* 100 ksps * 18 */
spi-max-frequency = <1800000>;
vref-supply = <®_vcc_3v3>;
#io-channel-cells = <1>;
};
};
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>, <&pinctrl_eqos_phy>;
phy-mode = "rgmii-id";
phy-handle = <ðphy3>;
status = "okay";
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
ethphy3: ethernet-phy@3 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <3>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,dp83867-rxctrl-strap-quirk;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
reset-assert-us = <500000>;
reset-deassert-us = <50000>;
enet-phy-lane-no-swap;
interrupt-parent = <&gpio4>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
};
};
};
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec>, <&pinctrl_fec_phy>;
phy-mode = "rgmii-id";
phy-handle = <ðphy0>;
fsl,magic-packet;
status = "okay";
mdio {
#address-cells = <1>;
#size-cells = <0>;
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,dp83867-rxctrl-strap-quirk;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>;
reset-assert-us = <500000>;
reset-deassert-us = <50000>;
enet-phy-lane-no-swap;
interrupt-parent = <&gpio4>;
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
};
};
};
&flexcan1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan1>;
xceiver-supply = <®_vcc_3v3>;
status = "okay";
};
&flexcan2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan2>;
xceiver-supply = <®_vcc_3v3>;
status = "okay";
};
&gpio1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio1>;
gpio-line-names = "GPO1", "GPO0", "", "GPO3",
"", "", "GPO2", "GPI0",
"PMIC_IRQ", "GPI1", "OTG_ID", "USB_HUB_RST#",
"OTG_PWR", "", "GPI2", "GPI3",
"", "", "", "",
"", "", "", "",
"", "", "", "",
"", "", "", "";
};
&gpio2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hoggpio2>;
gpio-line-names = "", "", "", "",
"", "", "VCC12V_EN", "PERST#",
"", "", "CLKREQ#", "PEWAKE#",
"USDHC2_CD", "", "", "",
"", "", "", "V_SD3V3_EN",
"", "", "", "",
"", "", "", "",
"", "", "", "";
perst-hog {
gpio-hog;
gpios = <7 0>;
output-high;
line-name = "PERST#";
};
clkreq-hog {
gpio-hog;
gpios = <10 0>;
input;
line-name = "CLKREQ#";
};
pewake-hog {
gpio-hog;
gpios = <11 0>;
input;
line-name = "PEWAKE#";
};
};
&gpio3 {
gpio-line-names = "", "", "", "",
"", "", "", "",
"", "", "", "",
"", "", "LVDS0_RESET#", "",
"", "", "", "LVDS0_BLT_EN",
"LVDS0_PWR_EN", "", "", "",
"", "", "", "",
"", "", "", "";
};
&gpio4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio4>;
gpio-line-names = "ENET0_RST#", "ENET0_INT#", "ENET1_RST#", "ENET1_INT#",
"", "", "", "",
"", "", "", "",
"", "", "", "",
"", "", "DP_IRQ", "DSI_EN",
"HDMI_OC#", "TEMP_EVENT#", "PCIE_REFCLK_OE#", "",
"", "", "", "FAN_PWR",
"RTC_EVENT#", "CODEC_RST#", "", "";
pcie-refclkreq-hog {
gpio-hog;
gpios = <22 0>;
output-high;
line-name = "PCIE_REFCLK_OE#";
};
};
&gpio5 {
gpio-line-names = "", "", "", "LED2",
"LED1", "LED0", "CSI0_RESET#", "CSI0_SYNC",
"CSI0_TRIGGER", "CSI0_ENABLE", "", "",
"", "ECSPI2_SS0", "", "",
"", "", "", "",
"", "", "", "",
"", "ECSPI3_SS0", "SWITCH_A", "SWITCH_B",
"", "", "", "";
};
&hdmi_pvi {
status = "okay";
};
&hdmi_tx {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hdmi>;
status = "okay";
ports {
port@1 {
hdmi_tx_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
};
};
&hdmi_tx_phy {
status = "okay";
};
&i2c2 {
clock-frequency = <384000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
tlv320aic3x04: audio-codec@18 {
compatible = "ti,tlv320aic32x4";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tlv320aic3x04>;
reg = <0x18>;
clock-names = "mclk";
clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
iov-supply = <®_vcc_1v8>;
ldoin-supply = <®_vcc_3v3>;
};
se97_1c: temperature-sensor@1c {
compatible = "nxp,se97b", "jedec,jc-42.4-temp";
reg = <0x1c>;
};
at24c02_54: eeprom@54 {
compatible = "nxp,se97b", "atmel,24c02";
reg = <0x54>;
pagesize = <16>;
vcc-supply = <®_vcc_3v3>;
};
pcieclk: clock-generator@6a {
compatible = "renesas,9fgv0241";
reg = <0x6a>;
clocks = <&clk_xtal25>;
#clock-cells = <1>;
};
};
&i2c4 {
clock-frequency = <384000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c4>;
pinctrl-1 = <&pinctrl_i2c4_gpio>;
scl-gpios = <&gpio5 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&i2c6 {
clock-frequency = <384000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c6>;
pinctrl-1 = <&pinctrl_i2c6_gpio>;
scl-gpios = <&gpio2 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio2 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&lcdif3 {
status = "okay";
};
&pcf85063 {
/* RTC_EVENT# is connected on MBa8MPxL */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcf85063>;
interrupt-parent = <&gpio4>;
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
};
&pcie_phy {
fsl,clkreq-unsupported;
fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
clocks = <&pcieclk 0>;
clock-names = "ref";
status = "okay";
};
&pcie {
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
<&clk IMX8MP_CLK_HSIO_AXI>,
<&clk IMX8MP_CLK_PCIE_ROOT>;
clock-names = "pcie", "pcie_bus", "pcie_aux";
assigned-clocks = <&clk IMX8MP_CLK_PCIE_AUX>;
assigned-clock-rates = <10000000>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_50M>;
status = "okay";
};
&pwm2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "disabled";
};
&pwm3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
};
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
assigned-clock-rates = <12288000>;
fsl,sai-mclk-direction-output;
status = "okay";
};
&snvs_pwrkey {
status = "okay";
};
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
assigned-clocks = <&clk IMX8MP_CLK_UART1>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
assigned-clocks = <&clk IMX8MP_CLK_UART2>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
assigned-clocks = <&clk IMX8MP_CLK_UART3>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
status = "okay";
};
&uart4 {
/* console */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
status = "okay";
};
&usb3_0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
fsl,over-current-active-low;
status = "okay";
};
&usb3_1 {
fsl,disable-port-power-control;
fsl,permanently-attached;
status = "okay";
};
&usb3_phy0 {
vbus-supply = <®_vcc_5v0>;
status = "okay";
};
&usb3_phy1 {
vbus-supply = <®_vcc_5v0>;
status = "okay";
};
&usb_dwc3_0 {
/* dual role is implemented, but not a full featured OTG */
hnp-disable;
srp-disable;
adp-disable;
dr_mode = "otg";
usb-role-switch;
role-switch-default-mode = "peripheral";
status = "okay";
port {
usb3_dwc: endpoint {
remote-endpoint = <&usb_dr_connector>;
};
};
};
&usb_dwc3_1 {
dr_mode = "host";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbhub>;
status = "okay";
hub_2_0: hub@1 {
compatible = "usb451,8142";
reg = <1>;
peer-hub = <&hub_3_0>;
reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
vdd-supply = <®_vcc_3v3>;
};
hub_3_0: hub@2 {
compatible = "usb451,8140";
reg = <2>;
peer-hub = <&hub_2_0>;
reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
vdd-supply = <®_vcc_3v3>;
};
};
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
vmmc-supply = <®_usdhc2_vmmc>;
no-mmc;
no-sdio;
disable-wp;
bus-width = <4>;
status = "okay";
};
&iomuxc {
pinctrl_backlight: backlightgrp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x14>;
};
pinctrl_flexcan1: flexcan1grp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXD1__CAN1_TX 0x150>,
<MX8MP_IOMUXC_SAI5_RXD2__CAN1_RX 0x150>;
};
pinctrl_flexcan2: flexcan2grp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXD3__CAN2_TX 0x150>,
<MX8MP_IOMUXC_SAI5_MCLK__CAN2_RX 0x150>;
};
/* only on X57, primary used as CSI0 control signals */
pinctrl_ecspi1: ecspi1grp {
fsl,pins = <MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x1c0>,
<MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x1c0>,
<MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x1c0>,
<MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x1c0>;
};
/* on X63 and optionally on X57, can also be used as CSI1 control signals */
pinctrl_ecspi2: ecspi2grp {
fsl,pins = <MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x1c0>,
<MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x1c0>,
<MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x1c0>,
<MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x1c0>;
};
pinctrl_ecspi3: ecspi3grp {
fsl,pins = <MX8MP_IOMUXC_UART1_TXD__ECSPI3_MOSI 0x1c0>,
<MX8MP_IOMUXC_UART1_RXD__ECSPI3_SCLK 0x1c0>,
<MX8MP_IOMUXC_UART2_RXD__ECSPI3_MISO 0x1c0>,
<MX8MP_IOMUXC_UART2_TXD__GPIO5_IO25 0x1c0>;
};
pinctrl_eqos: eqosgrp {
fsl,pins = <MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x40000044>,
<MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x40000044>,
<MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90>,
<MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90>,
<MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90>,
<MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90>,
<MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90>,
<MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90>,
<MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x12>,
<MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x12>,
<MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x12>,
<MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x12>,
<MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x12>,
<MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x14>;
};
pinctrl_eqos_event: eqosevtgrp {
fsl,pins = <MX8MP_IOMUXC_SAI2_RXD0__ENET_QOS_1588_EVENT2_OUT 0x100>,
<MX8MP_IOMUXC_SAI2_TXD0__ENET_QOS_1588_EVENT2_IN 0x1c0>;
};
pinctrl_eqos_phy: eqosphygrp {
fsl,pins = <MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x100>,
<MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03 0x1c0>;
};
pinctrl_fec: fecgrp {
fsl,pins = <MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x40000044>,
<MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x40000044>,
<MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x90>,
<MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x90>,
<MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x90>,
<MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x90>,
<MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x90>,
<MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x90>,
<MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x12>,
<MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x12>,
<MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x12>,
<MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x12>,
<MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x12>,
<MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x14>;
};
pinctrl_fec_event: fecevtgrp {
fsl,pins = <MX8MP_IOMUXC_SAI1_RXFS__ENET1_1588_EVENT0_IN 0x100>,
<MX8MP_IOMUXC_SAI1_RXC__ENET1_1588_EVENT0_OUT 0x1c0>;
};
pinctrl_fec_phy: fecphygrp {
fsl,pins = <MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x100>,
<MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x1c0>;
};
pinctrl_fec_phyalt: fecphyaltgrp {
fsl,pins = <MX8MP_IOMUXC_SAI2_TXFS__GPIO4_IO24 0x180>,
<MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x180>;
};
pinctrl_gpiobutton: gpiobuttongrp {
fsl,pins = <MX8MP_IOMUXC_UART3_RXD__GPIO5_IO26 0x10>,
<MX8MP_IOMUXC_UART3_TXD__GPIO5_IO27 0x10>;
};
pinctrl_gpioled: gpioledgrp {
fsl,pins = <MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x14>,
<MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x14>,
<MX8MP_IOMUXC_SPDIF_TX__GPIO5_IO03 0x14>;
};
pinctrl_gpio1: gpio1grp {
fsl,pins = <MX8MP_IOMUXC_GPIO1_IO00__GPIO1_IO00 0x10>,
<MX8MP_IOMUXC_GPIO1_IO01__GPIO1_IO01 0x10>,
<MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x10>,
<MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x10>,
<MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07 0x80>,
<MX8MP_IOMUXC_GPIO1_IO09__GPIO1_IO09 0x80>,
<MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x80>,
<MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0x80>;
};
pinctrl_gpio4: gpio4grp {
fsl,pins = <MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x180>,
<MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x180>;
};
pinctrl_hdmi: hdmigrp {
fsl,pins = <MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c2>,
<MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c2>,
<MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010>,
<MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000010>;
};
pinctrl_hoggpio2: hoggpio2grp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x140>,
<MX8MP_IOMUXC_SD1_RESET_B__GPIO2_IO10 0x140>,
<MX8MP_IOMUXC_SD1_STROBE__GPIO2_IO11 0x140>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001e2>,
<MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001e2>;
};
pinctrl_i2c2_gpio: i2c2-gpiogrp {
fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x400001e2>,
<MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x400001e2>;
};
pinctrl_i2c4: i2c4grp {
fsl,pins = <MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001e2>,
<MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001e2>;
};
pinctrl_i2c4_gpio: i2c4-gpiogrp {
fsl,pins = <MX8MP_IOMUXC_I2C4_SCL__GPIO5_IO20 0x400001e2>,
<MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x400001e2>;
};
pinctrl_i2c6: i2c6grp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA0__I2C6_SCL 0x400001e2>,
<MX8MP_IOMUXC_SD1_DATA1__I2C6_SDA 0x400001e2>;
};
pinctrl_i2c6_gpio: i2c6-gpiogrp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA0__GPIO2_IO02 0x400001e2>,
<MX8MP_IOMUXC_SD1_DATA1__GPIO2_IO03 0x400001e2>;
};
pinctrl_lvdsdisplay: lvdsdisplaygrp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x10>; /* Power enable */
};
pinctrl_pcf85063: pcf85063grp {
fsl,pins = <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x80>;
};
/* LVDS Backlight */
pinctrl_pwm2: pwm2grp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT 0x14>;
};
/* FAN */
pinctrl_pwm3: pwm3grp {
fsl,pins = <MX8MP_IOMUXC_I2C3_SDA__PWM3_OUT 0x14>;
};
pinctrl_pwmfan: pwmfangrp {
fsl,pins = <MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x80>; /* FAN RPM */
};
pinctrl_reg12v0: reg12v0grp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x140>; /* VCC12V enable */
};
pinctrl_regpwmfan: regpwmfangrp {
fsl,pins = <MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x80>;
};
pinctrl_sai3: sai3grp {
fsl,pins = <
MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC 0x94
MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI3_TX_BCLK 0x94
MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI3_RX_DATA00 0x94
MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI3_TX_DATA00 0x94
MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SAI3_MCLK 0x94
>;
};
pinctrl_tlv320aic3x04: tlv320aic3x04grp {
fsl,pins = <
/* CODEC RST# */
MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x180
>;
};
/* X61 */
pinctrl_uart1: uart1grp {
fsl,pins = <MX8MP_IOMUXC_SD1_CLK__UART1_DCE_TX 0x140>,
<MX8MP_IOMUXC_SD1_CMD__UART1_DCE_RX 0x140>;
};
/* X61 */
pinctrl_uart2: uart2grp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA2__UART2_DCE_TX 0x140>,
<MX8MP_IOMUXC_SD1_DATA3__UART2_DCE_RX 0x140>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <MX8MP_IOMUXC_SD1_DATA6__UART3_DCE_TX 0x140>,
<MX8MP_IOMUXC_SD1_DATA7__UART3_DCE_RX 0x140>;
};
pinctrl_uart4: uart4grp {
fsl,pins = <MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x140>,
<MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x140>;
};
pinctrl_usb0: usb0grp {
fsl,pins = <MX8MP_IOMUXC_GPIO1_IO13__USB1_OTG_OC 0x1c0>,
<MX8MP_IOMUXC_GPIO1_IO12__USB1_OTG_PWR 0x1c0>;
};
pinctrl_usbcon0: usb0congrp {
fsl,pins = <MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x1c0>;
};
pinctrl_usbhub: usbhubgrp {
fsl,pins = <MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11 0x10>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x192>,
<MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d2>,
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d2>,
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d2>,
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d2>,
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>,
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194>,
<MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194>,
<MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
};
pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
fsl,pins = <MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c0>;
};
};
|
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
#include "aspeed-g4.dtsi"
#include <dt-bindings/gpio/aspeed-gpio.h>
#define EFUSE_OUTPUT(n) \
efuse##n { \
compatible = "regulator-output"; \
vout-supply = <&efuse##n>; \
}
#define __stringify(x) #x
#define EFUSE(hexaddr, num) \
efuse@##hexaddr { \
compatible = "ti,lm25066"; \
reg = <0x##hexaddr>; \
shunt-resistor-micro-ohms = <675>; \
regulators { \
efuse##num: vout { \
regulator-name = __stringify(efuse##num##-reg); \
}; \
}; \
}
/{
model = "Delta Power AHE-50DC";
compatible = "delta,ahe50dc-bmc", "aspeed,ast2400";
aliases {
serial4 = &uart5;
/*
* pca9541-arbitrated logical i2c buses are numbered as the
* corresponding physical bus plus 20
*/
i2c20 = &i2carb0;
i2c21 = &i2carb1;
i2c22 = &i2carb2;
i2c23 = &i2carb3;
i2c24 = &i2carb4;
i2c26 = &i2carb6;
i2c27 = &i2carb7;
i2c28 = &i2carb8;
i2c32 = &i2carb12;
};
chosen {
stdout-path = &uart3;
bootargs = "console=ttyS2,115200n8 earlycon";
};
memory@40000000 {
reg = <0x40000000 0x10000000>;
};
leds {
compatible = "gpio-leds";
heartbeat {
gpios = <&gpio ASPEED_GPIO(P, 0) GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
panic {
gpios = <&gpio ASPEED_GPIO(P, 2) GPIO_ACTIVE_HIGH>;
linux,default-trigger = "panic";
};
};
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>,
<&adc 5>, <&adc 6>, <&adc 7>, <&adc 8>, <&adc 9>;
};
EFUSE_OUTPUT(01);
EFUSE_OUTPUT(02);
EFUSE_OUTPUT(03);
EFUSE_OUTPUT(04);
EFUSE_OUTPUT(05);
EFUSE_OUTPUT(06);
EFUSE_OUTPUT(07);
EFUSE_OUTPUT(08);
EFUSE_OUTPUT(09);
EFUSE_OUTPUT(10);
EFUSE_OUTPUT(11);
EFUSE_OUTPUT(12);
EFUSE_OUTPUT(13);
EFUSE_OUTPUT(14);
EFUSE_OUTPUT(15);
EFUSE_OUTPUT(16);
EFUSE_OUTPUT(17);
EFUSE_OUTPUT(18);
EFUSE_OUTPUT(19);
EFUSE_OUTPUT(20);
EFUSE_OUTPUT(21);
EFUSE_OUTPUT(22);
EFUSE_OUTPUT(23);
EFUSE_OUTPUT(24);
EFUSE_OUTPUT(25);
EFUSE_OUTPUT(26);
EFUSE_OUTPUT(27);
EFUSE_OUTPUT(28);
EFUSE_OUTPUT(29);
EFUSE_OUTPUT(30);
EFUSE_OUTPUT(31);
EFUSE_OUTPUT(32);
EFUSE_OUTPUT(33);
EFUSE_OUTPUT(34);
EFUSE_OUTPUT(35);
EFUSE_OUTPUT(36);
EFUSE_OUTPUT(37);
EFUSE_OUTPUT(38);
EFUSE_OUTPUT(39);
EFUSE_OUTPUT(40);
EFUSE_OUTPUT(41);
EFUSE_OUTPUT(42);
EFUSE_OUTPUT(43);
EFUSE_OUTPUT(44);
EFUSE_OUTPUT(45);
EFUSE_OUTPUT(46);
EFUSE_OUTPUT(47);
EFUSE_OUTPUT(48);
EFUSE_OUTPUT(49);
EFUSE_OUTPUT(50);
};
&fmc {
status = "okay";
flash@0 {
status = "okay";
m25p,fast-read;
label = "flash0";
spi-max-frequency = <50000000>; // 50 MHz
#include "openbmc-flash-layout.dtsi"
};
};
&uart3 {
status = "okay";
};
&mac1 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
};
&i2c0 {
status = "okay";
bus-frequency = <200000>;
pca9541@79 {
compatible = "nxp,pca9541";
reg = <0x79>;
i2carb0: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
/* lm25066 efuses @ 10-17, 40-47, 50-57 */
EFUSE(10, 03);
EFUSE(11, 04);
EFUSE(12, 01);
EFUSE(13, 02);
EFUSE(14, 13);
EFUSE(15, 14);
EFUSE(16, 15);
EFUSE(17, 16);
EFUSE(40, 12);
EFUSE(41, 11);
EFUSE(42, 10);
EFUSE(43, 09);
EFUSE(44, 08);
EFUSE(45, 07);
EFUSE(46, 05);
EFUSE(47, 06);
EFUSE(50, 17);
EFUSE(51, 18);
EFUSE(52, 20);
EFUSE(53, 19);
EFUSE(54, 22);
EFUSE(55, 21);
EFUSE(56, 24);
EFUSE(57, 23);
};
};
};
&i2c1 {
status = "okay";
bus-frequency = <200000>;
pca9541@72 {
compatible = "nxp,pca9541";
reg = <0x72>;
i2carb1: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&i2c2 {
status = "okay";
bus-frequency = <200000>;
pca9541@73 {
compatible = "nxp,pca9541";
reg = <0x73>;
i2carb2: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&i2c3 {
status = "okay";
bus-frequency = <200000>;
pca9541@74 {
compatible = "nxp,pca9541";
reg = <0x74>;
i2carb3: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&i2c4 {
status = "okay";
bus-frequency = <200000>;
pca9541@7a {
compatible = "nxp,pca9541";
reg = <0x7a>;
i2carb4: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
gpio@20 {
compatible = "nxp,pca9534";
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
};
/* lm25066 efuses @ 10-17, 40-47, 50-57, 59, 5a */
EFUSE(10, 27);
EFUSE(11, 28);
EFUSE(12, 25);
EFUSE(13, 26);
EFUSE(14, 37);
EFUSE(15, 38);
EFUSE(16, 39);
EFUSE(17, 40);
EFUSE(40, 36);
EFUSE(41, 35);
EFUSE(42, 34);
EFUSE(43, 33);
EFUSE(44, 32);
EFUSE(45, 31);
EFUSE(46, 29);
EFUSE(47, 30);
EFUSE(50, 41);
EFUSE(51, 42);
EFUSE(52, 44);
EFUSE(53, 43);
EFUSE(54, 46);
EFUSE(55, 45);
EFUSE(56, 48);
EFUSE(57, 47);
EFUSE(59, 49);
EFUSE(5a, 50);
};
};
};
&i2c6 {
status = "okay";
bus-frequency = <200000>;
pca9541@75 {
compatible = "nxp,pca9541";
reg = <0x75>;
i2carb6: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&i2c7 {
status = "okay";
bus-frequency = <200000>;
pca9541@76 {
compatible = "nxp,pca9541";
reg = <0x76>;
i2carb7: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&i2c8 {
status = "okay";
bus-frequency = <200000>;
pca9541@7c {
compatible = "nxp,pca9541";
reg = <0x7c>;
i2carb8: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
fancontrol@30 {
compatible = "delta,ahe50dc-fan";
reg = <0x30>;
};
/* Baseboard FRU eeprom */
eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
};
};
};
};
&i2c12 {
status = "okay";
bus-frequency = <200000>;
pca9541@71 {
compatible = "nxp,pca9541";
reg = <0x71>;
i2carb12: i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
};
};
};
&gpio {
status = "okay";
gpio-line-names =
/* A */ "", "", "", "", "", "", "", "",
/* B */ "", "", "", "", "", "", "", "",
/* C */ "RESET_PEER_N", "HEARTBEAT_OUT", "", "", "", "", "", "",
/* D */ "", "", "", "", "", "", "", "",
/* E */ "DOOM_N", "", "", "", "", "LED_PWR_BLUE", "", "",
/* F */ "", "", "", "", "", "", "", "",
/* G */ "", "", "", "", "", "", "", "",
/* H */ "", "", "", "", "", "", "", "",
/* I */ "", "", "", "", "", "", "", "",
/* J */ "", "", "BMC_ID", "", "", "", "", "",
/* K */ "", "", "", "", "", "", "", "",
/* L */ "", "", "", "", "", "", "", "",
/* M */ "", "", "", "", "", "", "", "",
/* N */ "", "", "", "", "", "", "", "",
/* O */ "", "", "", "", "", "", "", "",
/* P */ "LED_GREEN", "", "LED_RED", "", "", "", "", "",
/* Q */ "", "", "", "", "", "", "", "",
/* R */ "", "", "", "", "", "", "", "",
/* S */ "", "", "", "", "", "", "", "",
/* T */ "", "", "", "", "", "", "", "",
/* U */ "", "", "", "", "", "", "", "",
/* V */ "", "", "", "", "", "", "", "",
/* W */ "", "", "", "", "", "", "", "",
/* X */ "", "", "", "", "", "", "", "",
/* Y */ "HEARTBEAT_IN", "BOARDREV0", "BOARDREV1", "",
/* Z */ "", "", "", "", "", "", "", "",
/* AA */ "", "", "", "", "", "", "", "",
/* AB */ "", "", "", "";
/*
* I don't rightly know what this GPIO really *is*, but setting it to
* zero causes the fans to run at full speed, after which setting it
* back to one causes a power output glitch, so install a hog to keep
* it at one as a failsafe to ensure nothing accidentally touches it.
*/
doom-guardrail {
gpio-hog;
gpios = <ASPEED_GPIO(E, 0) GPIO_ACTIVE_LOW>;
output-low;
};
};
&adc {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_adc0_default
&pinctrl_adc1_default
&pinctrl_adc2_default
&pinctrl_adc3_default
&pinctrl_adc4_default
&pinctrl_adc5_default
&pinctrl_adc6_default
&pinctrl_adc7_default
&pinctrl_adc8_default
&pinctrl_adc9_default>;
};
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
*/
#include "phy_qmath.h"
/*
* Description: This function make 16 bit unsigned multiplication.
* To fit the output into 16 bits the 32 bit multiplication result is right
* shifted by 16 bits.
*/
u16 qm_mulu16(u16 op1, u16 op2)
{
return (u16) (((u32) op1 * (u32) op2) >> 16);
}
/*
* Description: This function make 16 bit multiplication and return the result
* in 16 bits. To fit the multiplication result into 16 bits the multiplication
* result is right shifted by 15 bits. Right shifting 15 bits instead of 16 bits
* is done to remove the extra sign bit formed due to the multiplication.
* When both the 16bit inputs are 0x8000 then the output is saturated to
* 0x7fffffff.
*/
s16 qm_muls16(s16 op1, s16 op2)
{
s32 result;
if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000)
result = 0x7fffffff;
else
result = ((s32) (op1) * (s32) (op2));
return (s16) (result >> 15);
}
/*
* Description: This function add two 32 bit numbers and return the 32bit
* result. If the result overflow 32 bits, the output will be saturated to
* 32bits.
*/
s32 qm_add32(s32 op1, s32 op2)
{
s32 result;
result = op1 + op2;
if (op1 < 0 && op2 < 0 && result > 0)
result = 0x80000000;
else if (op1 > 0 && op2 > 0 && result < 0)
result = 0x7fffffff;
return result;
}
/*
* Description: This function add two 16 bit numbers and return the 16bit
* result. If the result overflow 16 bits, the output will be saturated to
* 16bits.
*/
s16 qm_add16(s16 op1, s16 op2)
{
s16 result;
s32 temp = (s32) op1 + (s32) op2;
if (temp > (s32) 0x7fff)
result = (s16) 0x7fff;
else if (temp < (s32) 0xffff8000)
result = (s16) 0xffff8000;
else
result = (s16) temp;
return result;
}
/*
* Description: This function make 16 bit subtraction and return the 16bit
* result. If the result overflow 16 bits, the output will be saturated to
* 16bits.
*/
s16 qm_sub16(s16 op1, s16 op2)
{
s16 result;
s32 temp = (s32) op1 - (s32) op2;
if (temp > (s32) 0x7fff)
result = (s16) 0x7fff;
else if (temp < (s32) 0xffff8000)
result = (s16) 0xffff8000;
else
result = (s16) temp;
return result;
}
/*
* Description: This function make a 32 bit saturated left shift when the
* specified shift is +ve. This function will make a 32 bit right shift when
* the specified shift is -ve. This function return the result after shifting
* operation.
*/
s32 qm_shl32(s32 op, int shift)
{
int i;
s32 result;
result = op;
if (shift > 31)
shift = 31;
else if (shift < -31)
shift = -31;
if (shift >= 0) {
for (i = 0; i < shift; i++)
result = qm_add32(result, result);
} else {
result = result >> (-shift);
}
return result;
}
/*
* Description: This function make a 16 bit saturated left shift when the
* specified shift is +ve. This function will make a 16 bit right shift when
* the specified shift is -ve. This function return the result after shifting
* operation.
*/
s16 qm_shl16(s16 op, int shift)
{
int i;
s16 result;
result = op;
if (shift > 15)
shift = 15;
else if (shift < -15)
shift = -15;
if (shift > 0) {
for (i = 0; i < shift; i++)
result = qm_add16(result, result);
} else {
result = result >> (-shift);
}
return result;
}
/*
* Description: This function make a 16 bit right shift when shift is +ve.
* This function make a 16 bit saturated left shift when shift is -ve. This
* function return the result of the shift operation.
*/
s16 qm_shr16(s16 op, int shift)
{
return qm_shl16(op, -shift);
}
/*
* Description: This function return the number of redundant sign bits in a
* 32 bit number. Example: qm_norm32(0x00000080) = 23
*/
s16 qm_norm32(s32 op)
{
u16 u16extraSignBits;
if (op == 0) {
return 31;
} else {
u16extraSignBits = 0;
while ((op >> 31) == (op >> 30)) {
u16extraSignBits++;
op = op << 1;
}
}
return u16extraSignBits;
}
/* This table is log2(1+(i/32)) where i=[0:1:32], in q.15 format */
static const s16 log_table[] = {
0,
1455,
2866,
4236,
5568,
6863,
8124,
9352,
10549,
11716,
12855,
13968,
15055,
16117,
17156,
18173,
19168,
20143,
21098,
22034,
22952,
23852,
24736,
25604,
26455,
27292,
28114,
28922,
29717,
30498,
31267,
32024,
32767
};
#define LOG_TABLE_SIZE 32 /* log_table size */
#define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */
#define Q_LOG_TABLE 15 /* qformat of log_table */
#define LOG10_2 19728 /* log10(2) in q.16 */
/*
* Description:
* This routine takes the input number N and its q format qN and compute
* the log10(N). This routine first normalizes the input no N. Then N is in
* mag*(2^x) format. mag is any number in the range 2^30-(2^31 - 1).
* Then log2(mag * 2^x) = log2(mag) + x is computed. From that
* log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed.
* This routine looks the log2 value in the table considering
* LOG2_LOG_TABLE_SIZE+1 MSBs. As the MSB is always 1, only next
* LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup. Next 16 MSBs are used
* for interpolation.
* Inputs:
* N - number to which log10 has to be found.
* qN - q format of N
* log10N - address where log10(N) will be written.
* qLog10N - address where log10N qformat will be written.
* Note/Problem:
* For accurate results input should be in normalized or near normalized form.
*/
void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N)
{
s16 s16norm, s16tableIndex, s16errorApproximation;
u16 u16offset;
s32 s32log;
/* normalize the N. */
s16norm = qm_norm32(N);
N = N << s16norm;
/* The qformat of N after normalization.
* -30 is added to treat the no as between 1.0 to 2.0
* i.e. after adding the -30 to the qformat the decimal point will be
* just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e.
* at the right side of 30th bit.
*/
qN = qN + s16norm - 30;
/* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the
* MSB */
s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE)));
/* remove the MSB. the MSB is always 1 after normalization. */
s16tableIndex =
s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1);
/* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */
N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1);
/* take the offset as the 16 MSBS after table index.
*/
u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16)));
/* look the log value in the table. */
s32log = log_table[s16tableIndex]; /* q.15 format */
/* interpolate using the offset. q.15 format. */
s16errorApproximation = (s16) qm_mulu16(u16offset,
(u16) (log_table[s16tableIndex + 1] -
log_table[s16tableIndex]));
/* q.15 format */
s32log = qm_add16((s16) s32log, s16errorApproximation);
/* adjust for the qformat of the N as
* log2(mag * 2^x) = log2(mag) + x
*/
s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */
/* normalize the result. */
s16norm = qm_norm32(s32log);
/* bring all the important bits into lower 16 bits */
/* q.15+s16norm-16 format */
s32log = qm_shl32(s32log, s16norm - 16);
/* compute the log10(N) by multiplying log2(N) with log10(2).
* as log10(mag * 2^x) = log2(mag * 2^x) * log10(2)
* log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16)
*/
*log10N = qm_muls16((s16) s32log, (s16) LOG10_2);
/* write the q format of the result. */
*qLog10N = 15 + s16norm - 16 + 1;
return;
}
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#ifndef _LINUX_PACKING_H
#define _LINUX_PACKING_H
#include <linux/types.h>
#include <linux/bitops.h>
#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
#define QUIRK_LITTLE_ENDIAN BIT(1)
#define QUIRK_LSW32_IS_FIRST BIT(2)
enum packing_op {
PACK,
UNPACK,
};
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
enum packing_op op, u8 quirks);
int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
u8 quirks);
int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
size_t pbuflen, u8 quirks);
#endif
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*******************************************************************************
*
* CTU CAN FD IP Core
*
* Copyright (C) 2015-2018 Ondrej Ille <[email protected]> FEE CTU
* Copyright (C) 2018-2021 Ondrej Ille <[email protected]> self-funded
* Copyright (C) 2018-2019 Martin Jerabek <[email protected]> FEE CTU
* Copyright (C) 2018-2021 Pavel Pisa <[email protected]> FEE CTU/self-funded
*
* Project advisors:
* Jiri Novak <[email protected]>
* Pavel Pisa <[email protected]>
*
* Department of Measurement (http://meas.fel.cvut.cz/)
* Faculty of Electrical Engineering (http://www.fel.cvut.cz)
* Czech Technical University (http://www.cvut.cz/)
******************************************************************************/
/* This file is autogenerated, DO NOT EDIT! */
#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
#include <linux/bits.h>
/* CAN_Frame_format memory map */
enum ctu_can_fd_can_frame_format {
CTUCANFD_FRAME_FORMAT_W = 0x0,
CTUCANFD_IDENTIFIER_W = 0x4,
CTUCANFD_TIMESTAMP_L_W = 0x8,
CTUCANFD_TIMESTAMP_U_W = 0xc,
CTUCANFD_DATA_1_4_W = 0x10,
CTUCANFD_DATA_5_8_W = 0x14,
CTUCANFD_DATA_61_64_W = 0x4c,
};
/* CAN_FD_Frame_format memory region */
/* FRAME_FORMAT_W registers */
#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
#define REG_FRAME_FORMAT_W_RTR BIT(5)
#define REG_FRAME_FORMAT_W_IDE BIT(6)
#define REG_FRAME_FORMAT_W_FDF BIT(7)
#define REG_FRAME_FORMAT_W_BRS BIT(9)
#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
/* IDENTIFIER_W registers */
#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
/* TIMESTAMP_L_W registers */
#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
/* TIMESTAMP_U_W registers */
#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
/* DATA_1_4_W registers */
#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
/* DATA_5_8_W registers */
#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
/* DATA_61_64_W registers */
#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
#endif
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2022 Theobroma Systems Design und Consulting GmbH
*/
/dts-v1/;
#include "px30.dtsi"
#include <dt-bindings/leds/common.h>
/ {
aliases {
i2c10 = &i2c10;
mmc0 = &emmc;
mmc1 = &sdio;
rtc0 = &rtc_twi;
rtc1 = &rk809;
};
/* allows userspace to control the gate of the ATtiny UPDI pass FET via sysfs */
attiny-updi-gate-regulator {
compatible = "regulator-output";
vout-supply = <&vg_attiny_updi>;
};
emmc_pwrseq: emmc-pwrseq {
compatible = "mmc-pwrseq-emmc";
pinctrl-0 = <&emmc_reset>;
pinctrl-names = "default";
reset-gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_HIGH>;
};
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&module_led_pin>;
status = "okay";
module_led: led-0 {
gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_HIGH>;
function = LED_FUNCTION_HEARTBEAT;
linux,default-trigger = "heartbeat";
color = <LED_COLOR_ID_AMBER>;
};
};
vcc5v0_sys: regulator-vccsys {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_sys";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
};
&cpu0 {
cpu-supply = <&vdd_arm>;
};
&cpu1 {
cpu-supply = <&vdd_arm>;
};
&cpu2 {
cpu-supply = <&vdd_arm>;
};
&cpu3 {
cpu-supply = <&vdd_arm>;
};
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
mmc-hs200-1_8v;
mmc-pwrseq = <&emmc_pwrseq>;
non-removable;
vmmc-supply = <&vcc_3v3>;
vqmmc-supply = <&vcc_emmc>;
status = "okay";
};
/* On-module TI DP83825I PHY but no connector, enable in carrierboard */
&gmac {
snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 50000 50000>;
phy-supply = <&vcc_3v3>;
clock_in_out = "output";
};
&gpio2 {
/*
* The Qseven BIOS_DISABLE signal on the PX30-µQ7 keeps the on-module
* eMMC powered-down initially (in fact it keeps the reset signal
* asserted). BIOS_DISABLE_OVERRIDE pin allows to re-enable eMMC after
* the SPL has been booted from SD Card.
*/
bios-disable-override-hog {
gpios = <RK_PB5 GPIO_ACTIVE_LOW>;
output-high;
line-name = "bios_disable_override";
gpio-hog;
};
/*
* The BIOS_DISABLE hog is a feedback pin for the actual status of the
* signal, ignoring the BIOS_DISABLE_OVERRIDE logic. This usually
* represents the state of a switch on the baseboard.
*/
bios-disable-n-hog {
gpios = <RK_PC2 GPIO_ACTIVE_LOW>;
line-name = "bios_disable";
input;
gpio-hog;
};
};
&gpu {
status = "okay";
};
&i2c0 {
status = "okay";
rk809: pmic@20 {
compatible = "rockchip,rk809";
reg = <0x20>;
interrupt-parent = <&gpio0>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&pmic_int>;
pinctrl-names = "default";
#clock-cells = <0>;
clock-output-names = "xin32k";
system-power-controller;
wakeup-source;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
vcc3-supply = <&vcc5v0_sys>;
vcc4-supply = <&vcc5v0_sys>;
vcc5-supply = <&vcc_3v3>;
vcc6-supply = <&vcc_3v3>;
vcc7-supply = <&vcc_3v3>;
vcc9-supply = <&vcc5v0_sys>;
regulators {
vdd_log: DCDC_REG1 {
regulator-name = "vdd_log";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1350000>;
regulator-ramp-delay = <6001>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <950000>;
};
};
vdd_arm: DCDC_REG2 {
regulator-name = "vdd_arm";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1350000>;
regulator-ramp-delay = <6001>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-off-in-suspend;
regulator-suspend-microvolt = <950000>;
};
};
vcc_ddr: DCDC_REG3 {
regulator-name = "vcc_ddr";
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
};
};
vcc_3v0_1v8: vcc_emmc: DCDC_REG4 {
regulator-name = "vcc_3v0_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3000000>;
};
};
vcc_3v3: DCDC_REG5 {
regulator-name = "vcc_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3300000>;
};
};
vcc_1v8: LDO_REG2 {
regulator-name = "vcc_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <1800000>;
};
};
vcc_1v0: LDO_REG3 {
regulator-name = "vcc_1v0";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <1000000>;
};
};
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3300000>;
};
};
vcc_lcd: LDO_REG7 {
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-name = "vcc_lcd";
regulator-state-mem {
regulator-off-in-suspend;
regulator-suspend-microvolt = <1000000>;
};
};
vcc_1v8_lcd: LDO_REG8 {
regulator-name = "vcc_1v8_lcd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <1800000>;
};
};
vcca_1v8: LDO_REG9 {
regulator-name = "vcca_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-off-in-suspend;
regulator-suspend-microvolt = <1800000>;
};
};
/* supplies the gate of the ATtiny UPDI pass FET */
vg_attiny_updi: SWITCH_REG1 {
regulator-name = "vg_attiny_updi";
};
};
};
};
&i2c1 {
status = "okay";
/* SE05x is limited to Fast Mode */
clock-frequency = <400000>;
fan: fan@18 {
compatible = "tsd,mule", "ti,amc6821";
reg = <0x18>;
i2c-mux {
compatible = "tsd,mule-i2c-mux";
#address-cells = <1>;
#size-cells = <0>;
i2c10: i2c@0 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
rtc_twi: rtc@6f {
compatible = "isil,isl1208";
reg = <0x6f>;
};
};
};
};
};
&i2c3 {
status = "okay";
};
&i2s0_8ch {
rockchip,trcm-sync-tx-only;
pinctrl-0 = <&i2s0_8ch_sclktx &i2s0_8ch_lrcktx
&i2s0_8ch_sdo0 &i2s0_8ch_sdi0>;
};
&io_domains {
vccio1-supply = <&vcc_3v3>;
vccio2-supply = <&vccio_sd>;
vccio3-supply = <&vcc_3v3>;
vccio4-supply = <&vcc_3v3>;
vccio5-supply = <&vcc_3v3>;
vccio6-supply = <&vcc_emmc>;
vccio-oscgpi-supply = <&vcc_3v3>;
status = "okay";
};
&pinctrl {
emmc {
emmc_reset: emmc-reset {
rockchip,pins = <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
leds {
module_led_pin: module-led-pin {
rockchip,pins = <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int: pmic-int {
rockchip,pins =
<0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
&pmu_io_domains {
pmuio1-supply = <&vcc_3v3>;
pmuio2-supply = <&vcc_3v3>;
status = "okay";
};
&saradc {
vref-supply = <&vcc_1v8>;
status = "okay";
};
&sdmmc {
vqmmc-supply = <&vccio_sd>;
};
&tsadc {
status = "okay";
};
&u2phy {
status = "okay";
};
&u2phy_host {
status = "okay";
};
/* Mule UCAN */
&usb_host0_ehci {
status = "okay";
};
&usb_host0_ohci {
status = "okay";
};
&wdt {
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
* (SSE2 accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_sse2);
kernel_fpu_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static int nhpoly1305_sse2_digest(struct shash_desc *desc,
const u8 *src, unsigned int srclen, u8 *out)
{
return crypto_nhpoly1305_init(desc) ?:
nhpoly1305_sse2_update(desc, src, srclen) ?:
crypto_nhpoly1305_final(desc, out);
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-sse2",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_sse2_update,
.final = crypto_nhpoly1305_final,
.digest = nhpoly1305_sse2_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (SSE2-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <[email protected]>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-sse2");
|
/*
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#include <linux/types.h>
enum hdmi_quantization_range;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_edid;
struct drm_printer;
struct hdmi_avi_infoframe;
struct hdmi_vendor_infoframe;
struct i2c_adapter;
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
#define DISPLAYID_EXT 0x70
struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
} __packed;
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
} __packed;
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
u8 hactive_lo;
u8 hblank_lo;
u8 hactive_hblank_hi;
u8 vactive_lo;
u8 vblank_lo;
u8 vactive_vblank_hi;
u8 hsync_offset_lo;
u8 hsync_pulse_width_lo;
u8 vsync_offset_pulse_width_lo;
u8 hsync_vsync_offset_pulse_width_hi;
u8 width_mm_lo;
u8 height_mm_lo;
u8 width_height_mm_hi;
u8 hborder;
u8 vborder;
u8 misc;
} __packed;
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
} __packed;
#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 /* 1.3 */
#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 /* 1.4 */
#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 /* 1.3 */
#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 /* 1.4 */
#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
u8 flags;
union {
struct {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __packed gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
u8 data2; /* plus low 2 of above: max hactive */
u8 supported_aspects;
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
} __packed cvt;
} __packed formula;
} __packed;
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
} __packed;
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
} __packed;
struct cvt_timing {
u8 code[3];
} __packed;
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
fb=color point data, fa=standard timing data,
f9=undefined, f8=mfg. reserved */
u8 pad2;
union {
struct detailed_data_string str;
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
} __packed data;
} __packed;
#define EDID_DETAIL_EST_TIMINGS 0xf7
#define EDID_DETAIL_CVT_3BYTE 0xf8
#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
#define EDID_DETAIL_MONITOR_RANGE 0xfd
#define EDID_DETAIL_MONITOR_STRING 0xfe
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
__le16 pixel_clock; /* need to multiply by 10 KHz */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
} __packed data;
} __packed;
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7)
#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) /* 1.2 */
#define DRM_EDID_FEATURE_CONTINUOUS_FREQ (1 << 0) /* 1.4 */
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
/* If analog */
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
/* If digital */
#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
#define DRM_EDID_FEATURE_RGB (0 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
#define DRM_EDID_HDMI_DC_48 (1 << 6)
#define DRM_EDID_HDMI_DC_36 (1 << 5)
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
/* YCBCR 420 deep color modes */
#define DRM_EDID_YCBCR420_DC_48 (1 << 2)
#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
/* HDMI 2.1 additional fields */
#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0
#define DRM_EDID_FAPA_START_LOCATION (1 << 0)
#define DRM_EDID_ALLM (1 << 1)
#define DRM_EDID_FVA (1 << 2)
/* Deep Color specific */
#define DRM_EDID_DC_30BIT_420 (1 << 0)
#define DRM_EDID_DC_36BIT_420 (1 << 1)
#define DRM_EDID_DC_48BIT_420 (1 << 2)
/* VRR specific */
#define DRM_EDID_CNMVRR (1 << 3)
#define DRM_EDID_CINEMA_VRR (1 << 4)
#define DRM_EDID_MDELTA (1 << 5)
#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0
#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff
#define DRM_EDID_VRR_MIN_MASK 0x3f
/* DSC specific */
#define DRM_EDID_DSC_10BPC (1 << 0)
#define DRM_EDID_DSC_12BPC (1 << 1)
#define DRM_EDID_DSC_16BPC (1 << 2)
#define DRM_EDID_DSC_ALL_BPP (1 << 3)
#define DRM_EDID_DSC_NATIVE_420 (1 << 6)
#define DRM_EDID_DSC_1P2 (1 << 7)
#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0
#define DRM_EDID_DSC_MAX_SLICES 0xf
#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
struct drm_edid_product_id {
__be16 manufacturer_name;
__le16 product_code;
__le32 serial_number;
u8 week_of_manufacture;
u8 year_of_manufacture;
} __packed;
struct edid {
u8 header[8];
/* Vendor & product info */
union {
struct drm_edid_product_id product_id;
struct {
u8 mfg_id[2];
u8 prod_code[2];
u32 serial; /* FIXME: byte order */
u8 mfg_week;
u8 mfg_year;
} __packed;
} __packed;
/* EDID version */
u8 version;
u8 revision;
/* Display info: */
u8 input;
u8 width_cm;
u8 height_cm;
u8 gamma;
u8 features;
/* Color characteristics */
u8 red_green_lo;
u8 blue_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
u8 green_y;
u8 blue_x;
u8 blue_y;
u8 white_x;
u8 white_y;
/* Est. timings and mfg rsvd timings*/
struct est_timings established_timings;
/* Standard timings 1-8*/
struct std_timing standard_timings[8];
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[4];
/* Number of 128 byte ext. blocks */
u8 extensions;
/* Checksum */
u8 checksum;
} __packed;
/* EDID matching */
struct drm_edid_ident {
/* ID encoded by drm_edid_encode_panel_id() */
u32 panel_id;
const char *name;
};
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
/* Short Audio Descriptor */
struct cea_sad {
u8 format;
u8 channels; /* max number of channels - 1 */
u8 freq;
u8 byte2; /* meaning depends on format */
};
int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode);
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
/**
* drm_edid_decode_mfg_id - Decode the manufacturer ID
* @mfg_id: The manufacturer ID
* @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
* termination
*/
static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4])
{
vend[0] = '@' + ((mfg_id >> 10) & 0x1f);
vend[1] = '@' + ((mfg_id >> 5) & 0x1f);
vend[2] = '@' + ((mfg_id >> 0) & 0x1f);
vend[3] = '\0';
return vend;
}
/**
* drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
* @vend_chr_0: First character of the vendor string.
* @vend_chr_1: Second character of the vendor string.
* @vend_chr_2: Third character of the vendor string.
* @product_id: The 16-bit product ID.
*
* This is a macro so that it can be calculated at compile time and used
* as an initializer.
*
* For instance:
* drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
*
* Return: a 32-bit ID per panel.
*/
#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
(((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
(((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
((product_id) & 0xffff))
/**
* drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
* @panel_id: The panel ID to decode.
* @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
* termination
* @product_id: The product ID will be returned here.
*
* For instance, after:
* drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
* These will be true:
* vend[0] = 'B'
* vend[1] = 'O'
* vend[2] = 'E'
* vend[3] = '\0'
* product_id = 0x2d08
*/
static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
{
*product_id = (u16)(panel_id & 0xffff);
drm_edid_decode_mfg_id(panel_id >> 16, vend);
}
bool drm_probe_ddc(struct i2c_adapter *adapter);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_edid_override_connector_update(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
bool drm_detect_hdmi_monitor(const struct edid *edid);
bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
int drm_edid_header_is_valid(const void *edid);
bool drm_edid_is_valid(struct edid *edid);
void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
struct drm_display_mode *
drm_display_mode_from_cea_vic(struct drm_device *dev,
u8 video_code);
/* Interface based on struct drm_edid */
const struct drm_edid *drm_edid_alloc(const void *edid, size_t size);
const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid);
void drm_edid_free(const struct drm_edid *drm_edid);
bool drm_edid_valid(const struct drm_edid *drm_edid);
const struct edid *drm_edid_raw(const struct drm_edid *drm_edid);
const struct drm_edid *drm_edid_read(struct drm_connector *connector);
const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
struct i2c_adapter *adapter);
const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
void *context);
const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter);
const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *edid);
int drm_edid_connector_add_modes(struct drm_connector *connector);
bool drm_edid_is_digital(const struct drm_edid *drm_edid);
void drm_edid_get_product_id(const struct drm_edid *drm_edid,
struct drm_edid_product_id *id);
void drm_edid_print_product_id(struct drm_printer *p,
const struct drm_edid_product_id *id, bool raw);
u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
bool drm_edid_match(const struct drm_edid *drm_edid,
const struct drm_edid_ident *ident);
#endif /* __DRM_EDID_H__ */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef __IA_CSS_CNR_HOST_H
#define __IA_CSS_CNR_HOST_H
#include "ia_css_cnr_param.h"
void
ia_css_init_cnr_state(
void/*struct sh_css_isp_cnr_vmem_state*/ * state,
size_t size);
#endif /* __IA_CSS_CNR_HOST_H */
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "iavf_status.h"
#include "iavf_type.h"
#include "iavf_register.h"
#include "iavf_adminq.h"
#include "iavf_prototype.h"
/**
* iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
{
enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct iavf_asq_cmd_details)));
if (ret_code) {
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
return ret_code;
}
/**
* iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
{
enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
* iavf_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
static void iavf_free_adminq_asq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
* iavf_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
static void iavf_free_adminq_arq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
struct iavf_aq_desc *desc;
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
/* buffer_info structures do not need alignment */
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries *
sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
iavf_mem_arq_buf,
hw->aq.arq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_arq_bufs;
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16((u16)bi->size);
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
desc->params.external.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
desc->params.external.param0 = 0;
desc->params.external.param1 = 0;
}
alloc_arq_bufs:
return ret_code;
unwind_alloc_arq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
{
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries *
sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
iavf_mem_asq_buf,
hw->aq.asq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_asq_bufs;
}
alloc_asq_bufs:
return ret_code;
unwind_alloc_asq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* iavf_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void iavf_free_arq_bufs(struct iavf_hw *hw)
{
int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* iavf_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void iavf_free_asq_bufs(struct iavf_hw *hw)
{
int i;
/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */
iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* iavf_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, IAVF_VF_ATQH1, 0);
wr32(hw, IAVF_VF_ATQT1, 0);
/* set starting point */
wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries |
IAVF_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
reg = rd32(hw, IAVF_VF_ATQBAL1);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* iavf_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, IAVF_VF_ARQH1, 0);
wr32(hw, IAVF_VF_ARQT1, 0);
/* set starting point */
wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries |
IAVF_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
reg = rd32(hw, IAVF_VF_ARQBAL1);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* iavf_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
/* allocate the ring memory */
ret_code = iavf_alloc_adminq_asq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = iavf_alloc_asq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
init_free_asq_bufs:
for (i = 0; i < hw->aq.num_asq_entries; i++)
iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
init_adminq_exit:
return ret_code;
}
/**
* iavf_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
/* allocate the ring memory */
ret_code = iavf_alloc_adminq_arq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = iavf_alloc_arq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
init_free_arq_bufs:
for (i = 0; i < hw->aq.num_arq_entries; i++)
iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
* iavf_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
ret_code = IAVF_ERR_NOT_READY;
goto shutdown_asq_out;
}
/* Stop firmware AdminQ processing */
wr32(hw, IAVF_VF_ATQH1, 0);
wr32(hw, IAVF_VF_ATQT1, 0);
wr32(hw, IAVF_VF_ATQLEN1, 0);
wr32(hw, IAVF_VF_ATQBAL1, 0);
wr32(hw, IAVF_VF_ATQBAH1, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
iavf_free_asq_bufs(hw);
shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
return ret_code;
}
/**
* iavf_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
ret_code = IAVF_ERR_NOT_READY;
goto shutdown_arq_out;
}
/* Stop firmware AdminQ processing */
wr32(hw, IAVF_VF_ARQH1, 0);
wr32(hw, IAVF_VF_ARQT1, 0);
wr32(hw, IAVF_VF_ARQLEN1, 0);
wr32(hw, IAVF_VF_ARQBAL1, 0);
wr32(hw, IAVF_VF_ARQBAH1, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
iavf_free_arq_bufs(hw);
shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
/**
* iavf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.num_arq_entries
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
{
enum iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
/* allocate the ASQ */
ret_code = iavf_init_asq(hw);
if (ret_code)
goto init_adminq_destroy_locks;
/* allocate the ARQ */
ret_code = iavf_init_arq(hw);
if (ret_code)
goto init_adminq_free_asq;
/* success! */
goto init_adminq_exit;
init_adminq_free_asq:
iavf_shutdown_asq(hw);
init_adminq_destroy_locks:
init_adminq_exit:
return ret_code;
}
/**
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
iavf_shutdown_asq(hw);
iavf_shutdown_arq(hw);
return 0;
}
/**
* iavf_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
static u16 iavf_clean_asq(struct iavf_hw *hw)
{
struct iavf_adminq_ring *asq = &hw->aq.asq;
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct iavf_aq_desc desc_cb;
struct iavf_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, IAVF_VF_ATQH1) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1));
if (details->callback) {
IAVF_ADMINQ_CALLBACK cb_func =
(IAVF_ADMINQ_CALLBACK)details->callback;
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
memset((void *)details, 0,
sizeof(struct iavf_asq_cmd_details));
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
return IAVF_DESC_UNUSED(asq);
}
/**
* iavf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
bool iavf_asq_done(struct iavf_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use;
}
/**
* iavf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
struct iavf_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
struct iavf_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum iavf_status status = 0;
u16 retval = 0;
u32 val = 0;
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
hw->aq.asq_last_status = IAVF_AQ_RC_OK;
val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
/* If the cmd_details are defined copy the cookie. The
* cpu_to_le32 is not needed here because the data is ignored
* by the FW, only used by the driver
*/
if (details->cookie) {
desc->cookie_high =
cpu_to_le32(upper_32_bits(details->cookie));
desc->cookie_low =
cpu_to_le32(lower_32_bits(details->cookie));
}
} else {
memset(details, 0, sizeof(struct iavf_asq_cmd_details));
}
/* clear requested flags and then set additional flags if defined */
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
if (buff_size > hw->aq.asq_buf_size) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
status = IAVF_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
if (details->postpone && !details->async) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
status = IAVF_ERR_PARAM;
goto asq_send_command_error;
}
/* call clean and check queue available function to reclaim the
* descriptors that were processed by FW, the function returns the
* number of desc available
*/
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
if (iavf_clean_asq(hw) == 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
status = IAVF_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
/* initialize the temp desc pointer with the right desc */
desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
*desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
if (buff) {
dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
/* copy the user buff into the respective DMA buff */
memcpy(dma_buff->va, buff, buff_size);
desc_on_ring->datalen = cpu_to_le16(buff_size);
/* Update the address values in the desc with the pa value
* for respective buffer
*/
desc_on_ring->params.external.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
desc_on_ring->params.external.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
/* bump the tail */
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
do {
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (iavf_asq_done(hw))
break;
udelay(50);
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
/* if ready, copy the desc back to temp */
if (iavf_asq_done(hw)) {
*desc = *desc_on_ring;
if (buff)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
if (retval != 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
cmd_completed = true;
if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
status = 0;
else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* save writeback aq if requested */
if (details->wb_desc)
*details->wb_desc = *desc_on_ring;
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
return status;
}
/**
* iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
desc->opcode = cpu_to_le16(opcode);
desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
}
/**
* iavf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
struct iavf_aq_desc *desc;
enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
u16 datalen;
u16 flags;
u16 ntu;
/* pre-clean the event info */
memset(&e->desc, 0, sizeof(e->desc));
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
ret_code = IAVF_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
/* set next_to_use to head */
ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
/* now clean the next descriptor */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
(enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & IAVF_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
}
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_len = min(datalen, e->buf_len);
if (e->msg_buf && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len);
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, IAVF_VF_ARQT1, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
ntc = 0;
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_TERM_H
#define __PERF_TERM_H
struct termios;
struct winsize;
void get_term_dimensions(struct winsize *ws);
void set_term_quiet_input(struct termios *old);
#endif /* __PERF_TERM_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_SET_SREGS tests
*
* Copyright (C) 2018, Google LLC.
*
* This is a regression test for the bug fixed by the following commit:
* d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values")
*
* That bug allowed a user-mode program that called the KVM_SET_SREGS
* ioctl to put a VCPU's local APIC into an invalid state.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define TEST_INVALID_CR_BIT(vcpu, cr, orig, bit) \
do { \
struct kvm_sregs new; \
int rc; \
\
/* Skip the sub-test, the feature/bit is supported. */ \
if (orig.cr & bit) \
break; \
\
memcpy(&new, &orig, sizeof(sregs)); \
new.cr |= bit; \
\
rc = _vcpu_sregs_set(vcpu, &new); \
TEST_ASSERT(rc, "KVM allowed invalid " #cr " bit (0x%lx)", bit); \
\
/* Sanity check that KVM didn't change anything. */ \
vcpu_sregs_get(vcpu, &new); \
TEST_ASSERT(!memcmp(&new, &orig, sizeof(new)), "KVM modified sregs"); \
} while (0)
static uint64_t calc_supported_cr4_feature_bits(void)
{
uint64_t cr4;
cr4 = X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE |
X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE | X86_CR4_PGE |
X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT;
if (kvm_cpu_has(X86_FEATURE_UMIP))
cr4 |= X86_CR4_UMIP;
if (kvm_cpu_has(X86_FEATURE_LA57))
cr4 |= X86_CR4_LA57;
if (kvm_cpu_has(X86_FEATURE_VMX))
cr4 |= X86_CR4_VMXE;
if (kvm_cpu_has(X86_FEATURE_SMX))
cr4 |= X86_CR4_SMXE;
if (kvm_cpu_has(X86_FEATURE_FSGSBASE))
cr4 |= X86_CR4_FSGSBASE;
if (kvm_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (kvm_cpu_has(X86_FEATURE_XSAVE))
cr4 |= X86_CR4_OSXSAVE;
if (kvm_cpu_has(X86_FEATURE_SMEP))
cr4 |= X86_CR4_SMEP;
if (kvm_cpu_has(X86_FEATURE_SMAP))
cr4 |= X86_CR4_SMAP;
if (kvm_cpu_has(X86_FEATURE_PKU))
cr4 |= X86_CR4_PKE;
return cr4;
}
int main(int argc, char *argv[])
{
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t cr4;
int rc, i;
/*
* Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and
* use it to verify all supported CR4 bits can be set prior to defining
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/
vm = vm_create_barebones();
vcpu = __vm_vcpu_add(vm, 0);
vcpu_sregs_get(vcpu, &sregs);
sregs.cr0 = 0;
sregs.cr4 |= calc_supported_cr4_feature_bits();
cr4 = sregs.cr4;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
sregs.cr4, cr4);
/* Verify all unsupported features are rejected by KVM. */
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_UMIP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_LA57);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_VMXE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMXE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_FSGSBASE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PCIDE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_OSXSAVE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMEP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMAP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PKE);
for (i = 32; i < 64; i++)
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, BIT(i));
/* NW without CD is illegal, as is PG without PE. */
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_NW);
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_PG);
kvm_vm_free(vm);
/* Create a "real" VM and verify APIC_BASE can be set. */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_sregs_get(vcpu, &sregs);
sregs.apic_base = 1 << 10;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
sregs.apic_base);
sregs.apic_base = 1 << 11;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
sregs.apic_base);
kvm_vm_free(vm);
return 0;
}
|
/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
/*
* Copyright (c) 2023 Amlogic, Inc.
* Author: Hongyu Chen <[email protected]>
*/
#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H
#define _DT_BINDINGS_AMLOGIC_T7_POWER_H
#define PWRC_T7_DSPA_ID 0
#define PWRC_T7_DSPB_ID 1
#define PWRC_T7_DOS_HCODEC_ID 2
#define PWRC_T7_DOS_HEVC_ID 3
#define PWRC_T7_DOS_VDEC_ID 4
#define PWRC_T7_DOS_WAVE_ID 5
#define PWRC_T7_VPU_HDMI_ID 6
#define PWRC_T7_USB_COMB_ID 7
#define PWRC_T7_PCIE_ID 8
#define PWRC_T7_GE2D_ID 9
#define PWRC_T7_SRAMA_ID 10
#define PWRC_T7_SRAMB_ID 11
#define PWRC_T7_HDMIRX_ID 12
#define PWRC_T7_VI_CLK1_ID 13
#define PWRC_T7_VI_CLK2_ID 14
#define PWRC_T7_ETH_ID 15
#define PWRC_T7_ISP_ID 16
#define PWRC_T7_MIPI_ISP_ID 17
#define PWRC_T7_GDC_ID 18
#define PWRC_T7_CVE_ID 18
#define PWRC_T7_DEWARP_ID 19
#define PWRC_T7_SDIO_A_ID 20
#define PWRC_T7_SDIO_B_ID 21
#define PWRC_T7_EMMC_ID 22
#define PWRC_T7_MALI_SC0_ID 23
#define PWRC_T7_MALI_SC1_ID 24
#define PWRC_T7_MALI_SC2_ID 25
#define PWRC_T7_MALI_SC3_ID 26
#define PWRC_T7_MALI_TOP_ID 27
#define PWRC_T7_NNA_CORE0_ID 28
#define PWRC_T7_NNA_CORE1_ID 29
#define PWRC_T7_NNA_CORE2_ID 30
#define PWRC_T7_NNA_CORE3_ID 31
#define PWRC_T7_NNA_TOP_ID 32
#define PWRC_T7_DDR0_ID 33
#define PWRC_T7_DDR1_ID 34
#define PWRC_T7_DMC0_ID 35
#define PWRC_T7_DMC1_ID 36
#define PWRC_T7_NOC_ID 37
#define PWRC_T7_NIC2_ID 38
#define PWRC_T7_NIC3_ID 39
#define PWRC_T7_CCI_ID 40
#define PWRC_T7_MIPI_DSI0_ID 41
#define PWRC_T7_SPICC0_ID 42
#define PWRC_T7_SPICC1_ID 43
#define PWRC_T7_SPICC2_ID 44
#define PWRC_T7_SPICC3_ID 45
#define PWRC_T7_SPICC4_ID 46
#define PWRC_T7_SPICC5_ID 47
#define PWRC_T7_EDP0_ID 48
#define PWRC_T7_EDP1_ID 49
#define PWRC_T7_MIPI_DSI1_ID 50
#define PWRC_T7_AUDIO_ID 51
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* sunplus Watchdog Driver
*
* Copyright (C) 2021 Sunplus Technology Co., Ltd.
*
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
#define WDT_CTRL 0x00
#define WDT_CNT 0x04
#define WDT_STOP 0x3877
#define WDT_RESUME 0x4A4B
#define WDT_CLRIRQ 0x7482
#define WDT_UNLOCK 0xAB00
#define WDT_LOCK 0xAB01
#define WDT_CONMAX 0xDEAF
/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */
#define SP_WDT_MAX_TIMEOUT 11U
#define SP_WDT_DEFAULT_TIMEOUT 10
#define STC_CLK 90000
#define DEVICE_NAME "sunplus-wdt"
static unsigned int timeout;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct sp_wdt_priv {
struct watchdog_device wdev;
void __iomem *base;
struct clk *clk;
struct reset_control *rstc;
};
static int sp_wdt_restart(struct watchdog_device *wdev,
unsigned long action, void *data)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_STOP, base + WDT_CTRL);
writel(WDT_UNLOCK, base + WDT_CTRL);
writel(0x0001, base + WDT_CNT);
writel(WDT_LOCK, base + WDT_CTRL);
writel(WDT_RESUME, base + WDT_CTRL);
return 0;
}
static int sp_wdt_ping(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
u32 count;
if (wdev->timeout > SP_WDT_MAX_TIMEOUT) {
/* WDT_CONMAX sets the count to the maximum (down-counting). */
writel(WDT_CONMAX, base + WDT_CTRL);
} else {
writel(WDT_UNLOCK, base + WDT_CTRL);
/*
* Watchdog timer is a 20-bit down-counting based on STC_CLK.
* This register bits[16:0] is from bit[19:4] of the watchdog
* timer counter.
*/
count = (wdev->timeout * STC_CLK) >> 4;
writel(count, base + WDT_CNT);
writel(WDT_LOCK, base + WDT_CTRL);
}
return 0;
}
static int sp_wdt_stop(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_STOP, base + WDT_CTRL);
return 0;
}
static int sp_wdt_start(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_RESUME, base + WDT_CTRL);
return 0;
}
static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
u32 val;
val = readl(base + WDT_CNT);
val &= 0xffff;
val = val << 4;
return val;
}
static const struct watchdog_info sp_wdt_info = {
.identity = DEVICE_NAME,
.options = WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
};
static const struct watchdog_ops sp_wdt_ops = {
.owner = THIS_MODULE,
.start = sp_wdt_start,
.stop = sp_wdt_stop,
.ping = sp_wdt_ping,
.get_timeleft = sp_wdt_get_timeleft,
.restart = sp_wdt_restart,
};
static void sp_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int sp_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp_wdt_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk))
return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
/* The timer and watchdog shared the STC reset */
priv->rstc = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n");
reset_control_deassert(priv->rstc);
ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc);
if (ret)
return ret;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->wdev.info = &sp_wdt_info;
priv->wdev.ops = &sp_wdt_ops;
priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT;
priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000;
priv->wdev.min_timeout = 1;
priv->wdev.parent = dev;
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_init_timeout(&priv->wdev, timeout, dev);
watchdog_set_nowayout(&priv->wdev, nowayout);
watchdog_stop_on_reboot(&priv->wdev);
watchdog_set_restart_priority(&priv->wdev, 128);
return devm_watchdog_register_device(dev, &priv->wdev);
}
static const struct of_device_id sp_wdt_of_match[] = {
{.compatible = "sunplus,sp7021-wdt", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sp_wdt_of_match);
static struct platform_driver sp_wdt_driver = {
.probe = sp_wdt_probe,
.driver = {
.name = DEVICE_NAME,
.of_match_table = sp_wdt_of_match,
},
};
module_platform_driver(sp_wdt_driver);
MODULE_AUTHOR("Xiantao Hu <[email protected]>");
MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver");
MODULE_LICENSE("GPL");
|
#define SNDRV_STB
#include "interwave.c"
|
// SPDX-License-Identifier: GPL-2.0
/*
* This file setups defines to compile arch specific binary from the
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
* name and the definition of this function is included directly from
* 'arch/arm64/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
* Finally, the arch specific unwind methods are exported which will
* be assigned to each arm64 thread.
*/
#define REMOTE_UNWIND_LIBUNWIND
/* Define arch specific functions & regs for libunwind, should be
* defined before including "unwind.h"
*/
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arm64_reg_id(regnum)
#include "unwind.h"
#include "libunwind-aarch64.h"
#define perf_event_arm_regs perf_event_arm64_regs
#include <../../../arch/arm64/include/uapi/asm/perf_regs.h>
#undef perf_event_arm_regs
#include "../../arch/arm64/util/unwind-libunwind.c"
/* NO_LIBUNWIND_DEBUG_FRAME is a feature flag for local libunwind,
* assign NO_LIBUNWIND_DEBUG_FRAME_AARCH64 to it for compiling arm64
* unwind methods.
*/
#undef NO_LIBUNWIND_DEBUG_FRAME
#ifdef NO_LIBUNWIND_DEBUG_FRAME_AARCH64
#define NO_LIBUNWIND_DEBUG_FRAME
#endif
#include "util/unwind-libunwind-local.c"
struct unwind_libunwind_ops *
arm64_unwind_libunwind_ops = &_unwind_libunwind_ops;
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP event handling
*
* Copyright 2011-2020 NXP
*/
#include "decl.h"
#include "main.h"
#include "11n.h"
#define MWIFIEX_BSS_START_EVT_FIX_SIZE 12
static int mwifiex_check_uap_capabilities(struct mwifiex_private *priv,
struct sk_buff *event)
{
int evt_len;
u8 *curr;
u16 tlv_len;
struct mwifiex_ie_types_data *tlv_hdr;
struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
priv->wmm_enabled = false;
skb_pull(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
evt_len = event->len;
curr = event->data;
mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilities:",
event->data, event->len);
skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
while ((evt_len >= sizeof(tlv_hdr->header))) {
tlv_hdr = (struct mwifiex_ie_types_data *)curr;
tlv_len = le16_to_cpu(tlv_hdr->header.len);
if (evt_len < tlv_len + sizeof(tlv_hdr->header))
break;
switch (le16_to_cpu(tlv_hdr->header.type)) {
case WLAN_EID_HT_CAPABILITY:
priv->ap_11n_enabled = true;
break;
case WLAN_EID_VHT_CAPABILITY:
priv->ap_11ac_enabled = true;
break;
case WLAN_EID_VENDOR_SPECIFIC:
/* Point the regular IEEE IE 2 bytes into the Marvell IE
* and setup the IEEE IE type and length byte fields
*/
wmm_param_ie = (void *)(curr + 2);
wmm_param_ie->vend_hdr.len = (u8)tlv_len;
wmm_param_ie->vend_hdr.element_id =
WLAN_EID_VENDOR_SPECIFIC;
mwifiex_dbg(priv->adapter, EVENT,
"info: check uap capabilities:\t"
"wmm parameter set count: %d\n",
wmm_param_ie->qos_info_bitmap & mask);
mwifiex_wmm_setup_ac_downgrade(priv);
priv->wmm_enabled = true;
mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
break;
default:
break;
}
curr += (tlv_len + sizeof(tlv_hdr->header));
evt_len -= (tlv_len + sizeof(tlv_hdr->header));
}
return 0;
}
/*
* This function handles AP interface specific events generated by firmware.
*
* Event specific routines are called by this function based
* upon the generated event cause.
*
*
* Events supported for AP -
* - EVENT_UAP_STA_ASSOC
* - EVENT_UAP_STA_DEAUTH
* - EVENT_UAP_BSS_ACTIVE
* - EVENT_UAP_BSS_START
* - EVENT_UAP_BSS_IDLE
* - EVENT_UAP_MIC_COUNTERMEASURES:
*/
int mwifiex_process_uap_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
int len, i;
u32 eventcause = adapter->event_cause;
struct station_info *sinfo;
struct mwifiex_assoc_event *event;
struct mwifiex_sta_node *node;
u8 *deauth_mac;
struct host_cmd_ds_11n_batimeout *ba_timeout;
u16 ctrl;
switch (eventcause) {
case EVENT_UAP_STA_ASSOC:
sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
event = (struct mwifiex_assoc_event *)
(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
len = -1;
if (ieee80211_is_assoc_req(event->frame_control))
len = 0;
else if (ieee80211_is_reassoc_req(event->frame_control))
/* There will be ETH_ALEN bytes of
* current_ap_addr before the re-assoc ies.
*/
len = ETH_ALEN;
if (len != -1) {
sinfo->assoc_req_ies = &event->data[len];
len = (u8 *)sinfo->assoc_req_ies -
(u8 *)&event->frame_control;
sinfo->assoc_req_ies_len =
le16_to_cpu(event->len) - (u16)len;
}
}
cfg80211_new_sta(priv->netdev, event->sta_addr, sinfo,
GFP_KERNEL);
node = mwifiex_add_sta_entry(priv, event->sta_addr);
if (!node) {
mwifiex_dbg(adapter, ERROR,
"could not create station entry!\n");
kfree(sinfo);
return -1;
}
if (!priv->ap_11n_enabled) {
kfree(sinfo);
break;
}
mwifiex_set_sta_ht_cap(priv, sinfo->assoc_req_ies,
sinfo->assoc_req_ies_len, node);
for (i = 0; i < MAX_NUM_TID; i++) {
if (node->is_11n_enabled)
node->ampdu_sta[i] =
priv->aggr_prio_tbl[i].ampdu_user;
else
node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
kfree(sinfo);
break;
case EVENT_UAP_STA_DEAUTH:
deauth_mac = adapter->event_body +
MWIFIEX_UAP_EVENT_EXTRA_HEADER;
cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
if (priv->ap_11n_enabled) {
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
}
mwifiex_wmm_del_peer_ra_list(priv, deauth_mac);
mwifiex_del_sta_entry(priv, deauth_mac);
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
priv->port_open = false;
mwifiex_clean_txrx(priv);
mwifiex_del_all_sta_list(priv);
break;
case EVENT_UAP_BSS_ACTIVE:
priv->media_connected = true;
priv->port_open = true;
break;
case EVENT_UAP_BSS_START:
mwifiex_dbg(adapter, EVENT,
"AP EVENT: event id: %#x\n", eventcause);
priv->port_open = false;
eth_hw_addr_set(priv->netdev, adapter->event_body + 2);
if (priv->hist_data)
mwifiex_hist_data_reset(priv);
mwifiex_check_uap_capabilities(priv, adapter->event_skb);
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
mwifiex_dbg(adapter, EVENT,
"AP EVENT: event id: %#x\n", eventcause);
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = get_unaligned_le16(adapter->event_body);
mwifiex_dbg(adapter, EVENT,
"event: AMSDU_AGGR_CTRL %d\n", ctrl);
if (priv->media_connected) {
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
mwifiex_dbg(adapter, EVENT,
"event: tx_buf_size %d\n",
adapter->tx_buf_size);
}
break;
case EVENT_ADDBA:
mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
if (priv->media_connected)
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body, false);
break;
case EVENT_DELBA:
mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
if (priv->media_connected)
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n");
if (priv->media_connected) {
ba_timeout = (void *)adapter->event_body;
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
case EVENT_EXT_SCAN_REPORT:
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
if (adapter->ext_scan)
return mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
break;
case EVENT_TX_STATUS_REPORT:
mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
case EVENT_PS_SLEEP:
mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
adapter->ps_state = PS_STATE_PRE_SLEEP;
mwifiex_check_ps_cond(adapter);
break;
case EVENT_PS_AWAKE:
mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
mwifiex_dbg(adapter, EVENT,
"event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
if (mwifiex_check_last_packet_indication(priv)) {
if (adapter->data_sent ||
(adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))) {
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
break;
}
if (!mwifiex_send_null_packet
(priv,
MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
adapter->ps_state =
PS_STATE_SLEEP;
return 0;
}
}
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
break;
case EVENT_CHANNEL_REPORT_RDY:
mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
break;
case EVENT_RADAR_DETECTED:
mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
break;
case EVENT_BT_COEX_WLAN_PARA_CHANGE:
mwifiex_dbg(adapter, EVENT, "event: BT coex wlan param update\n");
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
case EVENT_TX_DATA_PAUSE:
mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
mwifiex_process_tx_pause_event(priv, adapter->event_skb);
break;
case EVENT_MULTI_CHAN_INFO:
mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
mwifiex_process_multi_chan_event(priv, adapter->event_skb);
break;
case EVENT_RXBA_SYNC:
dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
adapter->event_skb->len -
sizeof(eventcause));
break;
case EVENT_REMAIN_ON_CHAN_EXPIRED:
mwifiex_dbg(adapter, EVENT,
"event: uap: Remain on channel expired\n");
cfg80211_remain_on_channel_expired(&priv->wdev,
priv->roc_cfg.cookie,
&priv->roc_cfg.chan,
GFP_ATOMIC);
memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
break;
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
break;
}
return 0;
}
/* This function deletes station entry from associated station list.
* Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
* tables created for this station are deleted.
*/
void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
struct mwifiex_sta_node *node)
{
if (priv->ap_11n_enabled && node->is_11n_enabled) {
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
}
mwifiex_del_sta_entry(priv, node->mac_addr);
return;
}
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Headers for EFI variable service via StandAloneMM, EDK2 application running
* in OP-TEE. Most of the structs and defines resemble the EDK2 naming.
*
* Copyright (c) 2017, Intel Corporation. All rights reserved.
* Copyright (C) 2020 Linaro Ltd.
*/
#ifndef _MM_COMMUNICATION_H_
#define _MM_COMMUNICATION_H_
/*
* Interface to the pseudo Trusted Application (TA), which provides a
* communication channel with the Standalone MM (Management Mode)
* Secure Partition running at Secure-EL0
*/
#define PTA_STMM_CMD_COMMUNICATE 0
/*
* Defined in OP-TEE, this UUID is used to identify the pseudo-TA.
* OP-TEE is using big endian GUIDs while UEFI uses little endian ones
*/
#define PTA_STMM_UUID \
UUID_INIT(0xed32d533, 0x99e6, 0x4209, \
0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7)
#define EFI_MM_VARIABLE_GUID \
EFI_GUID(0xed32d533, 0x99e6, 0x4209, \
0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7)
/**
* struct efi_mm_communicate_header - Header used for SMM variable communication
* @header_guid: header use for disambiguation of content
* @message_len: length of the message. Does not include the size of the
* header
* @data: payload of the message
*
* Defined in the PI spec as EFI_MM_COMMUNICATE_HEADER.
* To avoid confusion in interpreting frames, the communication buffer should
* always begin with efi_mm_communicate_header.
*/
struct efi_mm_communicate_header {
efi_guid_t header_guid;
size_t message_len;
u8 data[];
} __packed;
#define MM_COMMUNICATE_HEADER_SIZE \
(sizeof(struct efi_mm_communicate_header))
/* SPM return error codes */
#define ARM_SVC_SPM_RET_SUCCESS 0
#define ARM_SVC_SPM_RET_NOT_SUPPORTED -1
#define ARM_SVC_SPM_RET_INVALID_PARAMS -2
#define ARM_SVC_SPM_RET_DENIED -3
#define ARM_SVC_SPM_RET_NO_MEMORY -5
#define SMM_VARIABLE_FUNCTION_GET_VARIABLE 1
/*
* The payload for this function is
* SMM_VARIABLE_COMMUNICATE_GET_NEXT_VARIABLE_NAME.
*/
#define SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME 2
/*
* The payload for this function is SMM_VARIABLE_COMMUNICATE_ACCESS_VARIABLE.
*/
#define SMM_VARIABLE_FUNCTION_SET_VARIABLE 3
/*
* The payload for this function is
* SMM_VARIABLE_COMMUNICATE_QUERY_VARIABLE_INFO.
*/
#define SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO 4
/*
* It is a notify event, no extra payload for this function.
*/
#define SMM_VARIABLE_FUNCTION_READY_TO_BOOT 5
/*
* It is a notify event, no extra payload for this function.
*/
#define SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE 6
/*
* The payload for this function is VARIABLE_INFO_ENTRY.
* The GUID in EFI_SMM_COMMUNICATE_HEADER is gEfiSmmVariableProtocolGuid.
*/
#define SMM_VARIABLE_FUNCTION_GET_STATISTICS 7
/*
* The payload for this function is SMM_VARIABLE_COMMUNICATE_LOCK_VARIABLE
*/
#define SMM_VARIABLE_FUNCTION_LOCK_VARIABLE 8
#define SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_SET 9
#define SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET 10
#define SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE 11
/*
* The payload for this function is
* SMM_VARIABLE_COMMUNICATE_RUNTIME_VARIABLE_CACHE_CONTEXT
*/
#define SMM_VARIABLE_FUNCTION_INIT_RUNTIME_VARIABLE_CACHE_CONTEXT 12
#define SMM_VARIABLE_FUNCTION_SYNC_RUNTIME_CACHE 13
/*
* The payload for this function is
* SMM_VARIABLE_COMMUNICATE_GET_RUNTIME_CACHE_INFO
*/
#define SMM_VARIABLE_FUNCTION_GET_RUNTIME_CACHE_INFO 14
/**
* struct smm_variable_communicate_header - Used for SMM variable communication
* @function: function to call in Smm.
* @ret_status: return status
* @data: payload
*/
struct smm_variable_communicate_header {
size_t function;
efi_status_t ret_status;
u8 data[];
};
#define MM_VARIABLE_COMMUNICATE_SIZE \
(sizeof(struct smm_variable_communicate_header))
/**
* struct smm_variable_access - Used to communicate with StMM by
* SetVariable and GetVariable.
* @guid: vendor GUID
* @data_size: size of EFI variable data
* @name_size: size of EFI name
* @attr: attributes
* @name: variable name
*
*/
struct smm_variable_access {
efi_guid_t guid;
size_t data_size;
size_t name_size;
u32 attr;
u16 name[];
};
#define MM_VARIABLE_ACCESS_HEADER_SIZE \
(sizeof(struct smm_variable_access))
/**
* struct smm_variable_payload_size - Used to get the max allowed
* payload used in StMM.
*
* @size: size to fill in
*
*/
struct smm_variable_payload_size {
size_t size;
};
/**
* struct smm_variable_getnext - Used to communicate with StMM for
* GetNextVariableName.
*
* @guid: vendor GUID
* @name_size: size of the name of the variable
* @name: variable name
*
*/
struct smm_variable_getnext {
efi_guid_t guid;
size_t name_size;
u16 name[];
};
#define MM_VARIABLE_GET_NEXT_HEADER_SIZE \
(sizeof(struct smm_variable_getnext))
/**
* struct smm_variable_query_info - Used to communicate with StMM for
* QueryVariableInfo.
*
* @max_variable_storage: max available storage
* @remaining_variable_storage: remaining available storage
* @max_variable_size: max variable supported size
* @attr: attributes to query storage for
*
*/
struct smm_variable_query_info {
u64 max_variable_storage;
u64 remaining_variable_storage;
u64 max_variable_size;
u32 attr;
};
#define VAR_CHECK_VARIABLE_PROPERTY_REVISION 0x0001
#define VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY BIT(0)
/**
* struct var_check_property - Used to store variable properties in StMM
*
* @revision: magic revision number for variable property checking
* @property: properties mask for the variable used in StMM.
* Currently RO flag is supported
* @attributes: variable attributes used in StMM checking when properties
* for a variable are enabled
* @minsize: minimum allowed size for variable payload checked against
* smm_variable_access->datasize in StMM
* @maxsize: maximum allowed size for variable payload checked against
* smm_variable_access->datasize in StMM
*
*/
struct var_check_property {
u16 revision;
u16 property;
u32 attributes;
size_t minsize;
size_t maxsize;
};
/**
* struct smm_variable_var_check_property - Used to communicate variable
* properties with StMM
*
* @guid: vendor GUID
* @name_size: size of EFI name
* @property: variable properties struct
* @name: variable name
*
*/
struct smm_variable_var_check_property {
efi_guid_t guid;
size_t name_size;
struct var_check_property property;
u16 name[];
};
#endif /* _MM_COMMUNICATION_H_ */
|
/*
* Hardware info common to DECstation 5000/1xx systems (otherwise
* known as 3min or kn02ba) and Personal DECstations 5000/xx ones
* (otherwise known as maxine or kn02ca).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
* are by courtesy of Chris Fraser.
* Copyright (C) 2000, 2002, 2003, 2005 Maciej W. Rozycki
*
* These are addresses which have to be known early in the boot process.
* For other addresses refer to tc.h, ioasic_addrs.h and friends.
*/
#ifndef __ASM_MIPS_DEC_KN02XA_H
#define __ASM_MIPS_DEC_KN02XA_H
#include <asm/dec/ioasic_addrs.h>
#define KN02XA_SLOT_BASE 0x1c000000
/*
* Memory control ASIC registers.
*/
#define KN02XA_MER 0x0c400000 /* memory error register */
#define KN02XA_MSR 0x0c800000 /* memory size register */
/*
* CPU control ASIC registers.
*/
#define KN02XA_MEM_CONF 0x0e000000 /* write timeout config */
#define KN02XA_EAR 0x0e000004 /* error address register */
#define KN02XA_BOOT0 0x0e000008 /* boot 0 register */
#define KN02XA_MEM_INTR 0x0e00000c /* write err IRQ stat & ack */
/*
* Memory Error Register bits, common definitions.
* The rest is defined in system-specific headers.
*/
#define KN02XA_MER_RES_28 (0xf<<28) /* unused */
#define KN02XA_MER_RES_17 (0x3ff<<17) /* unused */
#define KN02XA_MER_PAGERR (1<<16) /* 2k page boundary error */
#define KN02XA_MER_TRANSERR (1<<15) /* transfer length error */
#define KN02XA_MER_PARDIS (1<<14) /* parity error disable */
#define KN02XA_MER_SIZE (1<<13) /* r/o mirror of MSR_SIZE */
#define KN02XA_MER_RES_12 (1<<12) /* unused */
#define KN02XA_MER_BYTERR (0xf<<8) /* byte lane error bitmask: */
#define KN02XA_MER_BYTERR_3 (0x8<<8) /* byte lane #3 */
#define KN02XA_MER_BYTERR_2 (0x4<<8) /* byte lane #2 */
#define KN02XA_MER_BYTERR_1 (0x2<<8) /* byte lane #1 */
#define KN02XA_MER_BYTERR_0 (0x1<<8) /* byte lane #0 */
#define KN02XA_MER_RES_0 (0xff<<0) /* unused */
/*
* Memory Size Register bits, common definitions.
* The rest is defined in system-specific headers.
*/
#define KN02XA_MSR_RES_27 (0x1f<<27) /* unused */
#define KN02XA_MSR_RES_14 (0x7<<14) /* unused */
#define KN02XA_MSR_SIZE (1<<13) /* 16M/4M stride */
#define KN02XA_MSR_RES_0 (0x1fff<<0) /* unused */
/*
* Error Address Register bits.
*/
#define KN02XA_EAR_RES_29 (0x7<<29) /* unused */
#define KN02XA_EAR_ADDRESS (0x7ffffff<<2) /* address involved */
#define KN02XA_EAR_RES_0 (0x3<<0) /* unused */
#ifndef __ASSEMBLY__
#include <linux/interrupt.h>
struct pt_regs;
extern void dec_kn02xa_be_init(void);
extern int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup);
extern irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id);
#endif
#endif /* __ASM_MIPS_DEC_KN02XA_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2003 - 2009 NetXen, Inc.
* Copyright (C) 2009 - QLogic Corporation.
* All rights reserved.
*/
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
struct crb_addr_pair {
u32 addr;
u32 data;
};
#define NETXEN_MAX_CRB_XFORM 60
static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
#define NETXEN_ADDR_ERROR (0xffffffff)
#define crb_addr_transform(name) \
crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
#define NETXEN_NIC_XDMA_RESET 0x8000ff
static void
netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring);
static int netxen_p3_has_mn(struct netxen_adapter *adapter);
static void crb_addr_transform_setup(void)
{
crb_addr_transform(XDMA);
crb_addr_transform(TIMR);
crb_addr_transform(SRE);
crb_addr_transform(SQN3);
crb_addr_transform(SQN2);
crb_addr_transform(SQN1);
crb_addr_transform(SQN0);
crb_addr_transform(SQS3);
crb_addr_transform(SQS2);
crb_addr_transform(SQS1);
crb_addr_transform(SQS0);
crb_addr_transform(RPMX7);
crb_addr_transform(RPMX6);
crb_addr_transform(RPMX5);
crb_addr_transform(RPMX4);
crb_addr_transform(RPMX3);
crb_addr_transform(RPMX2);
crb_addr_transform(RPMX1);
crb_addr_transform(RPMX0);
crb_addr_transform(ROMUSB);
crb_addr_transform(SN);
crb_addr_transform(QMN);
crb_addr_transform(QMS);
crb_addr_transform(PGNI);
crb_addr_transform(PGND);
crb_addr_transform(PGN3);
crb_addr_transform(PGN2);
crb_addr_transform(PGN1);
crb_addr_transform(PGN0);
crb_addr_transform(PGSI);
crb_addr_transform(PGSD);
crb_addr_transform(PGS3);
crb_addr_transform(PGS2);
crb_addr_transform(PGS1);
crb_addr_transform(PGS0);
crb_addr_transform(PS);
crb_addr_transform(PH);
crb_addr_transform(NIU);
crb_addr_transform(I2Q);
crb_addr_transform(EG);
crb_addr_transform(MN);
crb_addr_transform(MS);
crb_addr_transform(CAS2);
crb_addr_transform(CAS1);
crb_addr_transform(CAS0);
crb_addr_transform(CAM);
crb_addr_transform(C2C1);
crb_addr_transform(C2C0);
crb_addr_transform(SMB);
crb_addr_transform(OCM0);
crb_addr_transform(I2C0);
}
void netxen_release_rx_buffers(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct netxen_rx_buffer *rx_buf;
int i, ring;
recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
for (i = 0; i < rds_ring->num_desc; ++i) {
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
rds_ring->dma_size, DMA_FROM_DEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
}
}
void netxen_release_tx_buffers(struct netxen_adapter *adapter)
{
struct netxen_cmd_buffer *cmd_buf;
struct netxen_skb_frag *buffrag;
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
spin_lock_bh(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
dma_unmap_page(&adapter->pdev->dev,
buffrag->dma, buffrag->length,
DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
}
if (cmd_buf->skb) {
dev_kfree_skb_any(cmd_buf->skb);
cmd_buf->skb = NULL;
}
cmd_buf++;
}
spin_unlock_bh(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_tx_ring *tx_ring;
int ring;
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
goto skip_rds;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
vfree(rds_ring->rx_buf_arr);
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
skip_rds:
if (adapter->tx_ring == NULL)
return;
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
kfree(tx_ring);
adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL)
goto err_out;
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
rds_ring = kcalloc(adapter->max_rds_rings,
sizeof(struct nx_host_rds_ring), GFP_KERNEL);
if (rds_ring == NULL)
goto err_out;
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
if (adapter->ahw.cut_through) {
rds_ring->dma_size =
NX_CT_DEFAULT_RX_BUF_LEN;
rds_ring->skb_size =
NX_CT_DEFAULT_RX_BUF_LEN;
} else {
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
rds_ring->dma_size =
NX_P3_RX_BUF_MAX_LEN;
else
rds_ring->dma_size =
NX_P2_RX_BUF_MAX_LEN;
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
}
break;
case RCV_RING_JUMBO:
rds_ring->num_desc = adapter->num_jumbo_rxd;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
rds_ring->dma_size =
NX_P3_RX_JUMBO_BUF_MAX_LEN;
else
rds_ring->dma_size =
NX_P2_RX_JUMBO_BUF_MAX_LEN;
if (adapter->capabilities & NX_CAP0_HW_LRO)
rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_LRO:
rds_ring->num_desc = adapter->num_lro_rxd;
rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL)
/* free whatever was already allocated */
goto err_out;
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
* and put them in the queues.
*/
rx_buf = rds_ring->rx_buf_arr;
for (i = 0; i < rds_ring->num_desc; i++) {
list_add_tail(&rx_buf->list,
&rds_ring->free_list);
rx_buf->ref_handle = i;
rx_buf->state = NETXEN_BUFFER_FREE;
rx_buf++;
}
spin_lock_init(&rds_ring->lock);
}
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
return 0;
err_out:
netxen_free_sw_resources(adapter);
return -ENOMEM;
}
/*
* netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
* address to external PCI CRB address.
*/
static u32 netxen_decode_crb_addr(u32 addr)
{
int i;
u32 base_addr, offset, pci_base;
crb_addr_transform_setup();
pci_base = NETXEN_ADDR_ERROR;
base_addr = addr & 0xfff00000;
offset = addr & 0x000fffff;
for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
if (crb_addr_xform[i] == base_addr) {
pci_base = i << 20;
break;
}
}
if (pci_base == NETXEN_ADDR_ERROR)
return pci_base;
else
return pci_base + offset;
}
#define NETXEN_MAX_ROM_WAIT_USEC 100
static int netxen_wait_rom_done(struct netxen_adapter *adapter)
{
long timeout = 0;
long done = 0;
cond_resched();
while (done == 0) {
done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
done &= 2;
if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
"Timeout reached waiting for rom done");
return -EIO;
}
udelay(1);
}
return 0;
}
static int do_rom_fast_read(struct netxen_adapter *adapter,
int addr, int *valp)
{
NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
if (netxen_wait_rom_done(adapter)) {
printk("Error waiting for rom done\n");
return -EIO;
}
/* reset abyte_cnt and dummy_byte_cnt */
NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
udelay(10);
NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
*valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
return 0;
}
static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size)
{
int addridx;
int ret = 0;
for (addridx = addr; addridx < (addr + size); addridx += 4) {
int v;
ret = do_rom_fast_read(adapter, addridx, &v);
if (ret != 0)
break;
*(__le32 *)bytes = cpu_to_le32(v);
bytes += 4;
}
return ret;
}
int
netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size)
{
int ret;
ret = netxen_rom_lock(adapter);
if (ret < 0)
return ret;
ret = do_rom_fast_read_words(adapter, addr, bytes, size);
netxen_rom_unlock(adapter);
return ret;
}
int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
{
int ret;
if (netxen_rom_lock(adapter) != 0)
return -EIO;
ret = do_rom_fast_read(adapter, addr, valp);
netxen_rom_unlock(adapter);
return ret;
}
#define NETXEN_BOARDTYPE 0x4008
#define NETXEN_BOARDNUM 0x400c
#define NETXEN_CHIPNUM 0x4010
int netxen_pinit_from_rom(struct netxen_adapter *adapter)
{
int addr, val;
int i, n, init_delay = 0;
struct crb_addr_pair *buf;
unsigned offset;
u32 off;
/* resetall */
netxen_rom_lock(adapter);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
netxen_rom_unlock(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
(n != 0xcafecafe) ||
netxen_rom_fast_read(adapter, 4, &n) != 0) {
printk(KERN_ERR "%s: ERROR Reading crb_init area: "
"n: %08x\n", netxen_nic_driver_name, n);
return -EIO;
}
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
} else {
if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
!(n & 0x80000000)) {
printk(KERN_ERR "%s: ERROR Reading crb_init area: "
"n: %08x\n", netxen_nic_driver_name, n);
return -EIO;
}
offset = 1;
n &= ~0x80000000;
}
if (n >= 1024) {
printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
" initialized.\n", __func__, n);
return -EIO;
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
kfree(buf);
return -EIO;
}
buf[i].addr = addr;
buf[i].data = val;
}
for (i = 0; i < n; i++) {
off = netxen_decode_crb_addr(buf[i].addr);
if (off == NETXEN_ADDR_ERROR) {
printk(KERN_ERR"CRB init value out of range %x\n",
buf[i].addr);
continue;
}
off += NETXEN_PCI_CRBSPACE;
if (off & 1)
continue;
/* skipping cold reboot MAGIC */
if (off == NETXEN_CAM_RAM(0x1fc))
continue;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (off == (NETXEN_CRB_I2C0 + 0x1c))
continue;
/* do not reset PCI */
if (off == (ROMUSB_GLB + 0xbc))
continue;
if (off == (ROMUSB_GLB + 0xa8))
continue;
if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
continue;
if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
continue;
if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
continue;
if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
continue;
if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
!NX_IS_REVISION_P3P(adapter->ahw.revision_id))
buf[i].data = 0x1020;
/* skip the function enable register */
if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
continue;
if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
continue;
if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
continue;
}
init_delay = 1;
/* After writing this register, HW needs time for CRB */
/* to quiet down (else crb_window returns 0xffffffff) */
if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
init_delay = 1000;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/* hold xdma in reset also */
buf[i].data = NETXEN_NIC_XDMA_RESET;
buf[i].data = 0x8000ff;
}
}
NXWR32(adapter, off, buf[i].data);
msleep(init_delay);
}
kfree(buf);
/* disable_peg_cache_all */
/* unreset_net_cache */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
}
/* p2dn replyCount */
NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
/* disable_peg_cache 0 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
/* disable_peg_cache 1 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
/* peg_clr_all */
/* peg_clr 0 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
/* peg_clr 1 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
/* peg_clr 2 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
/* peg_clr 3 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
return 0;
}
static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
{
uint32_t i;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
__le32 entries = cpu_to_le32(directory->num_entries);
for (i = 0; i < entries; i++) {
__le32 offs = cpu_to_le32(directory->findex) +
(i * cpu_to_le32(directory->entry_size));
__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
if (tab_type == section)
return (struct uni_table_desc *) &unirom[offs];
}
return NULL;
}
#define QLCNIC_FILEHEADER_SIZE (14 * 4)
static int
netxen_nic_validate_header(struct netxen_adapter *adapter)
{
const u8 *unirom = adapter->fw->data;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
u32 fw_file_size = adapter->fw->size;
u32 tab_size;
__le32 entries;
__le32 entry_size;
if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
return -EINVAL;
entries = cpu_to_le32(directory->num_entries);
entry_size = cpu_to_le32(directory->entry_size);
tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
if (fw_file_size < tab_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_bootld(struct netxen_adapter *adapter)
{
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
const u8 *unirom = adapter->fw->data;
__le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
NX_UNI_BOOTLD_IDX_OFF));
u32 offs;
u32 tab_size;
u32 data_size;
tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
if (!tab_desc)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_fw(struct netxen_adapter *adapter)
{
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
const u8 *unirom = adapter->fw->data;
__le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
NX_UNI_FIRMWARE_IDX_OFF));
u32 offs;
u32 tab_size;
u32 data_size;
tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
if (!tab_desc)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
{
struct uni_table_desc *ptab_descr;
const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
__le32 entries;
__le32 entry_size;
u32 tab_size;
u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
entry_size = cpu_to_le32(ptab_descr->entry_size);
tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
if (adapter->fw->size < tab_size)
return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
__le32 flags, file_chiprev, offs;
u8 chiprev = adapter->ahw.revision_id;
uint32_t flagbit;
offs = cpu_to_le32(ptab_descr->findex) +
(i * cpu_to_le32(ptab_descr->entry_size));
flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
NX_UNI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
if ((chiprev == file_chiprev) &&
((1ULL << flagbit) & flags)) {
adapter->file_prd_off = offs;
return 0;
}
}
if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
mn_present = 0;
goto nomn;
}
return -EINVAL;
}
static int
netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
{
if (netxen_nic_validate_header(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: header validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_product_offs(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: product validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_bootld(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: bootld validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_fw(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: firmware validation failed\n");
return -EINVAL;
}
return 0;
}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
{
const u8 *unirom = adapter->fw->data;
int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
idx_offset));
struct uni_table_desc *tab_desc;
__le32 offs;
tab_desc = nx_get_table_desc(unirom, section);
if (tab_desc == NULL)
return NULL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * idx);
return (struct uni_data_desc *)&unirom[offs];
}
static u8 *
nx_get_bootld_offs(struct netxen_adapter *adapter)
{
u32 offs = NETXEN_BOOTLD_START;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
offs = cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_BOOTLD,
NX_UNI_BOOTLD_IDX_OFF))->findex);
return (u8 *)&adapter->fw->data[offs];
}
static u8 *
nx_get_fw_offs(struct netxen_adapter *adapter)
{
u32 offs = NETXEN_IMAGE_START;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
offs = cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW,
NX_UNI_FIRMWARE_IDX_OFF))->findex);
return (u8 *)&adapter->fw->data[offs];
}
static __le32
nx_get_fw_size(struct netxen_adapter *adapter)
{
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
return cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW,
NX_UNI_FIRMWARE_IDX_OFF))->size);
else
return cpu_to_le32(
*(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
}
static __le32
nx_get_fw_version(struct netxen_adapter *adapter)
{
struct uni_data_desc *fw_data_desc;
const struct firmware *fw = adapter->fw;
__le32 major, minor, sub;
const u8 *ver_str;
int i, ret = 0;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
fw_data_desc = nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
cpu_to_le32(fw_data_desc->size) - 17;
for (i = 0; i < 12; i++) {
if (!strncmp(&ver_str[i], "REV=", 4)) {
ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
&major, &minor, &sub);
break;
}
}
if (ret != 3)
return 0;
return major + (minor << 8) + (sub << 16);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
}
static __le32
nx_get_bios_version(struct netxen_adapter *adapter)
{
const struct firmware *fw = adapter->fw;
__le32 bios_ver, prd_off = adapter->file_prd_off;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ NX_UNI_BIOS_VERSION_OFF));
return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
(bios_ver >> 24);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
}
int
netxen_need_fw_reset(struct netxen_adapter *adapter)
{
u32 count, old_count;
u32 val, version, major, minor, build;
int i, timeout;
u8 fw_type;
/* NX2031 firmware doesn't support heartbit */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
if (adapter->need_fw_reset)
return 1;
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
timeout = msleep_interruptible(200);
if (timeout) {
NXWR32(adapter, CRB_CMDPEG_STATE,
PHAN_INITIALIZE_FAILED);
return -EINTR;
}
count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
if (count != old_count)
break;
}
/* firmware is dead */
if (count == old_count)
return 1;
/* check if we have got newer or different file firmware */
if (adapter->fw) {
val = nx_get_fw_version(adapter);
version = NETXEN_DECODE_VERSION(val);
major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
if (version > NETXEN_VERSION_CODE(major, minor, build))
return 1;
if (version == NETXEN_VERSION_CODE(major, minor, build) &&
adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
fw_type = (val & 0x4) ?
NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
if (adapter->fw_type != fw_type)
return 1;
}
}
return 0;
}
#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
int
netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
{
u32 flash_fw_ver, min_fw_ver;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
dev_err(&adapter->pdev->dev, "Unable to read flash fw"
"version\n");
return -EIO;
}
flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
if (flash_fw_ver >= min_fw_ver)
return 0;
dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
"[4.0.505]. Please update firmware on flash\n",
_major(flash_fw_ver), _minor(flash_fw_ver),
_build(flash_fw_ver));
return -EINVAL;
}
static char *fw_name[] = {
NX_P2_MN_ROMIMAGE_NAME,
NX_P3_CT_ROMIMAGE_NAME,
NX_P3_MN_ROMIMAGE_NAME,
NX_UNIFIED_ROMIMAGE_NAME,
NX_FLASH_ROMIMAGE_NAME,
};
int
netxen_load_firmware(struct netxen_adapter *adapter)
{
u64 *ptr64;
u32 i, flashaddr, size;
const struct firmware *fw = adapter->fw;
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "loading firmware from %s\n",
fw_name[adapter->fw_type]);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
if (fw) {
__le64 data;
size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
ptr64 = (u64 *)nx_get_bootld_offs(adapter);
flashaddr = NETXEN_BOOTLD_START;
for (i = 0; i < size; i++) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter, flashaddr, data))
return -EIO;
flashaddr += 8;
}
size = (__force u32)nx_get_fw_size(adapter) / 8;
ptr64 = (u64 *)nx_get_fw_offs(adapter);
flashaddr = NETXEN_IMAGE_START;
for (i = 0; i < size; i++) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
flashaddr += 8;
}
size = (__force u32)nx_get_fw_size(adapter) % 8;
if (size) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
}
} else {
u64 data;
u32 hi, lo;
size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
flashaddr = NETXEN_BOOTLD_START;
for (i = 0; i < size; i++) {
if (netxen_rom_fast_read(adapter,
flashaddr, (int *)&lo) != 0)
return -EIO;
if (netxen_rom_fast_read(adapter,
flashaddr + 4, (int *)&hi) != 0)
return -EIO;
/* hi, lo are already in host endian byteorder */
data = (((u64)hi << 32) | lo);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
flashaddr += 8;
}
}
msleep(1);
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
} else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
else {
NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
}
return 0;
}
static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
__le32 flash_fw_ver;
u32 file_fw_ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
u32 crbinit_fix_fw;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
if (fw->size < NX_FW_MIN_SIZE)
return -EINVAL;
}
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
min_ver = NETXEN_MIN_P3_FW_SUPP;
else
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
file_fw_ver = NETXEN_DECODE_VERSION(val);
if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
(file_fw_ver < min_ver)) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
_build(file_fw_ver));
return -EINVAL;
}
val = nx_get_bios_version(adapter);
if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
return -EIO;
if ((__force u32)val != bios) {
dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
fw_name[fw_type]);
return -EINVAL;
}
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
dev_err(&pdev->dev, "Unable to read flash fw version\n");
return -EIO;
}
flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
/* New fw from file is not allowed, if fw on flash is < 4.0.554 */
crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
dev_err(&pdev->dev, "Incompatibility detected between driver "
"and firmware version on flash. This configuration "
"is not recommended. Please update the firmware on "
"flash immediately\n");
return -EINVAL;
}
/* check if flashed firmware is newer only for no-mn and P2 case*/
if (!netxen_p3_has_mn(adapter) ||
NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (flash_fw_ver > file_fw_ver) {
dev_info(&pdev->dev, "%s: firmware is older than flash\n",
fw_name[fw_type]);
return -EINVAL;
}
}
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
static void
nx_get_next_fwtype(struct netxen_adapter *adapter)
{
u8 fw_type;
switch (adapter->fw_type) {
case NX_UNKNOWN_ROMIMAGE:
fw_type = NX_UNIFIED_ROMIMAGE;
break;
case NX_UNIFIED_ROMIMAGE:
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
fw_type = NX_FLASH_ROMIMAGE;
else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
fw_type = NX_P2_MN_ROMIMAGE;
else if (netxen_p3_has_mn(adapter))
fw_type = NX_P3_MN_ROMIMAGE;
else
fw_type = NX_P3_CT_ROMIMAGE;
break;
case NX_P3_MN_ROMIMAGE:
fw_type = NX_P3_CT_ROMIMAGE;
break;
case NX_P2_MN_ROMIMAGE:
case NX_P3_CT_ROMIMAGE:
default:
fw_type = NX_FLASH_ROMIMAGE;
break;
}
adapter->fw_type = fw_type;
}
static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
}
return 0;
}
void netxen_request_firmware(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int rc = 0;
adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
next:
nx_get_next_fwtype(adapter);
if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
adapter->fw = NULL;
} else {
rc = request_firmware(&adapter->fw,
fw_name[adapter->fw_type], &pdev->dev);
if (rc != 0)
goto next;
rc = netxen_validate_firmware(adapter);
if (rc != 0) {
release_firmware(adapter->fw);
msleep(1);
goto next;
}
}
}
void
netxen_release_firmware(struct netxen_adapter *adapter)
{
release_firmware(adapter->fw);
adapter->fw = NULL;
}
int netxen_init_dummy_dma(struct netxen_adapter *adapter)
{
u64 addr;
u32 hi, lo;
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
NETXEN_HOST_DUMMY_DMA_SIZE,
&adapter->dummy_dma.phys_addr,
GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
return -ENOMEM;
}
addr = (uint64_t) adapter->dummy_dma.phys_addr;
hi = (addr >> 32) & 0xffffffff;
lo = addr & 0xffffffff;
NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
return 0;
}
/*
* NetXen DMA watchdog control:
*
* Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
* Bit 1 : disable_request => 1 req disable dma watchdog
* Bit 2 : enable_request => 1 req enable dma watchdog
* Bit 3-31 : unused
*/
void netxen_free_dummy_dma(struct netxen_adapter *adapter)
{
int i = 100;
u32 ctrl;
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return;
if (!adapter->dummy_dma.addr)
return;
ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
if ((ctrl & 0x1) != 0) {
NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
while ((ctrl & 0x1) != 0) {
msleep(50);
ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
if (--i == 0)
break;
}
}
if (i) {
dma_free_coherent(&adapter->pdev->dev,
NETXEN_HOST_DUMMY_DMA_SIZE,
adapter->dummy_dma.addr,
adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
}
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
{
u32 val = 0;
int retries = 60;
if (pegtune_val)
return 0;
do {
val = NXRD32(adapter, CRB_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
case PHAN_INITIALIZE_ACK:
return 0;
case PHAN_INITIALIZE_FAILED:
goto out_err;
default:
break;
}
msleep(500);
} while (--retries);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
out_err:
dev_warn(&adapter->pdev->dev, "firmware init failed\n");
return -EIO;
}
static int
netxen_receive_peg_ready(struct netxen_adapter *adapter)
{
u32 val = 0;
int retries = 2000;
do {
val = NXRD32(adapter, CRB_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
msleep(10);
} while (--retries);
pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
return -EIO;
}
int netxen_init_firmware(struct netxen_adapter *adapter)
{
int err;
err = netxen_receive_peg_ready(adapter);
if (err)
return err;
NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
return err;
}
static void
netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
{
u32 cable_OUI;
u16 cable_len;
u16 link_speed;
u8 link_status, module, duplex, autoneg;
struct net_device *netdev = adapter->netdev;
adapter->has_link_events = 1;
cable_OUI = msg->body[1] & 0xffffffff;
cable_len = (msg->body[1] >> 32) & 0xffff;
link_speed = (msg->body[1] >> 48) & 0xffff;
link_status = msg->body[2] & 0xff;
duplex = (msg->body[2] >> 16) & 0xff;
autoneg = (msg->body[2] >> 24) & 0xff;
module = (msg->body[2] >> 8) & 0xff;
if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
netdev->name, cable_OUI, cable_len);
} else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
printk(KERN_INFO "%s: unsupported cable length %d\n",
netdev->name, cable_len);
}
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
else
adapter->link_duplex = DUPLEX_HALF;
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
netxen_advert_link_change(adapter, link_status);
}
static void
netxen_handle_fw_message(int desc_cnt, int index,
struct nx_host_sds_ring *sds_ring)
{
nx_fw_msg_t msg;
struct status_desc *desc;
int i = 0, opcode;
while (desc_cnt > 0 && i < 8) {
desc = &sds_ring->desc_head[index];
msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
index = get_next_index(index, sds_ring->num_desc);
desc_cnt--;
}
opcode = netxen_get_nic_msg_opcode(msg.body[0]);
switch (opcode) {
case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
netxen_handle_linkevent(sds_ring->adapter, &msg);
break;
default:
break;
}
}
static int
netxen_alloc_rx_skb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring,
struct netxen_rx_buffer *buffer)
{
struct sk_buff *skb;
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!buffer->skb)
return 1;
skb = buffer->skb;
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
}
buffer->skb = skb;
buffer->dma = dma;
buffer->state = NETXEN_BUFFER_BUSY;
return 0;
}
static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
{
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
buffer = &rds_ring->rx_buf_arr[index];
dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
DMA_FROM_DEVICE);
skb = buffer->skb;
if (!skb)
goto no_skb;
if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
&& cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
return skb;
}
static struct netxen_rx_buffer *
netxen_process_rcv(struct netxen_adapter *adapter,
struct nx_host_sds_ring *sds_ring,
int ring, u64 sts_data0)
{
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
struct nx_host_rds_ring *rds_ring;
int index, length, cksum, pkt_offset;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_sts_refhandle(sts_data0);
if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
length = netxen_get_sts_totallength(sts_data0);
cksum = netxen_get_sts_status(sts_data0);
pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
if (!skb)
return buffer;
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
skb_put(skb, length);
if (pkt_offset)
skb_pull(skb, pkt_offset);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.rx_pkts++;
adapter->stats.rxbytes += length;
return buffer;
}
#define TCP_HDR_SIZE 20
#define TCP_TS_OPTION_SIZE 12
#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
static struct netxen_rx_buffer *
netxen_process_lro(struct netxen_adapter *adapter,
struct nx_host_sds_ring *sds_ring,
int ring, u64 sts_data0, u64 sts_data1)
{
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
struct nx_host_rds_ring *rds_ring;
struct iphdr *iph;
struct tcphdr *th;
bool push, timestamp;
int l2_hdr_offset, l4_hdr_offset;
int index;
u16 lro_length, length, data_offset;
u32 seq_number;
u8 vhdr_len = 0;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
timestamp = netxen_get_lro_sts_timestamp(sts_data0);
lro_length = netxen_get_lro_sts_length(sts_data0);
l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
push = netxen_get_lro_sts_push_flag(sts_data0);
seq_number = netxen_get_lro_sts_seq_number(sts_data1);
skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
if (!skb)
return buffer;
if (timestamp)
data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
else
data_offset = l4_hdr_offset + TCP_HDR_SIZE;
skb_put(skb, lro_length + data_offset);
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
if (skb->protocol == htons(ETH_P_8021Q))
vhdr_len = VLAN_HLEN;
iph = (struct iphdr *)(skb->data + vhdr_len);
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
if (adapter->flags & NETXEN_FW_MSS_CAP)
skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1);
netif_receive_skb(skb);
adapter->stats.lro_pkts++;
adapter->stats.rxbytes += length;
return buffer;
}
#define netxen_merge_rx_buffers(list, head) \
do { list_splice_tail_init(list, head); } while (0);
int
netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
{
struct netxen_adapter *adapter = sds_ring->adapter;
struct list_head *cur;
struct status_desc *desc;
struct netxen_rx_buffer *rxbuf;
u32 consumer = sds_ring->consumer;
int count = 0;
u64 sts_data0, sts_data1;
int opcode, ring = 0, desc_cnt;
while (count < max) {
desc = &sds_ring->desc_head[consumer];
sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
if (!(sts_data0 & STATUS_OWNER_HOST))
break;
desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
opcode = netxen_get_sts_opcode(sts_data0);
switch (opcode) {
case NETXEN_NIC_RXPKT_DESC:
case NETXEN_OLD_RXPKT_DESC:
case NETXEN_NIC_SYN_OFFLOAD:
ring = netxen_get_sts_type(sts_data0);
rxbuf = netxen_process_rcv(adapter, sds_ring,
ring, sts_data0);
break;
case NETXEN_NIC_LRO_DESC:
ring = netxen_get_lro_sts_type(sts_data0);
sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
rxbuf = netxen_process_lro(adapter, sds_ring,
ring, sts_data0, sts_data1);
break;
case NETXEN_NIC_RESPONSE_DESC:
netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
goto skip;
default:
goto skip;
}
WARN_ON(desc_cnt > 1);
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] =
cpu_to_le64(STATUS_OWNER_PHANTOM);
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
struct nx_host_rds_ring *rds_ring =
&adapter->recv_ctx.rds_rings[ring];
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur,
struct netxen_rx_buffer, list);
netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
}
spin_lock(&rds_ring->lock);
netxen_merge_rx_buffers(&sds_ring->free_list[ring],
&rds_ring->free_list);
spin_unlock(&rds_ring->lock);
}
netxen_post_rx_buffers_nodb(adapter, rds_ring);
}
if (count) {
sds_ring->consumer = consumer;
NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
}
return count;
}
/* Process Command status ring */
int netxen_process_cmd_ring(struct netxen_adapter *adapter)
{
u32 sw_consumer, hw_consumer;
int count = 0, i;
struct netxen_cmd_buffer *buffer;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct netxen_skb_frag *frag;
int done = 0;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock_bh(&adapter->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
while (sw_consumer != hw_consumer) {
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
dma_unmap_single(&pdev->dev, frag->dma, frag->length,
DMA_TO_DEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
dma_unmap_page(&pdev->dev, frag->dma,
frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
if (++count >= MAX_STATUS_HANDLE)
break;
}
tx_ring->sw_consumer = sw_consumer;
if (count && netif_running(netdev)) {
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_wake_queue(netdev);
adapter->tx_timeo_cnt = 0;
}
/*
* If everything is freed up to consumer then check if the ring is full
* If the ring is full then check if more needs to be freed and
* schedule the call back again.
*
* This happens when there are 2 CPUs. One could be freeing and the
* other filling it. If the ring is full when we get out of here and
* the card has already interrupted the host then the host can miss the
* interrupt.
*
* There is still a possible race condition and the host could miss an
* interrupt. The card has to take care of this.
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
spin_unlock_bh(&adapter->tx_clean_lock);
return done;
}
void
netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
struct nx_host_rds_ring *rds_ring)
{
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int producer, count = 0;
netxen_ctx_msg msg = 0;
struct list_head *head;
producer = rds_ring->producer;
head = &rds_ring->free_list;
while (!list_empty(head)) {
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
if (!buffer->skb) {
if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
if (count) {
rds_ring->producer = producer;
NXWRIO(adapter, rds_ring->crb_rcv_producer,
(producer-1) & (rds_ring->num_desc-1));
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/*
* Write a doorbell msg to tell phanmon of change in
* receive ring producer
* Only for firmware version < 4.0.0
*/
netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
netxen_set_msg_privid(msg);
netxen_set_msg_count(msg,
((producer - 1) &
(rds_ring->num_desc - 1)));
netxen_set_msg_ctxid(msg, adapter->portnum);
netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
NXWRIO(adapter, DB_NORMALIZE(adapter,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
}
static void
netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring)
{
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int producer, count = 0;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
return;
producer = rds_ring->producer;
head = &rds_ring->free_list;
while (!list_empty(head)) {
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
if (!buffer->skb) {
if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
if (count) {
rds_ring->producer = producer;
NXWRIO(adapter, rds_ring->crb_rcv_producer,
(producer - 1) & (rds_ring->num_desc - 1));
}
spin_unlock(&rds_ring->lock);
}
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
}
|
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
* Christian König
*/
#include <linux/debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include "radeon.h"
/*
* Rings
* Most engines on the GPU are fed via ring buffers. Ring
* buffers are areas of GPU accessible memory that the host
* writes commands into and the GPU reads commands out of.
* There is a rptr (read pointer) that determines where the
* GPU is currently reading, and a wptr (write pointer)
* which determines where the host has written. When the
* pointers are equal, the ring is idle. When the host
* writes commands to the ring buffer, it increments the
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if a specific ring supports writing to scratch registers (all asics).
* Returns true if the ring supports writing to scratch regs, false if not.
*/
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
struct radeon_ring *ring)
{
switch (ring->idx) {
case RADEON_RING_TYPE_GFX_INDEX:
case CAYMAN_RING_TYPE_CP1_INDEX:
case CAYMAN_RING_TYPE_CP2_INDEX:
return true;
default:
return false;
}
}
/**
* radeon_ring_free_size - update the free size
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Update the free dw slots in the ring buffer (all asics).
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
/* This works because ring_size is a power of 2 */
ring->ring_free_dw = rptr + (ring->ring_size / 4);
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
/* this is an empty ring */
ring->ring_free_dw = ring->ring_size / 4;
/* update lockup info to avoid false positive */
radeon_ring_lockup_update(rdev, ring);
}
}
/**
* radeon_ring_alloc - allocate space on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
/* make sure we aren't trying to allocate more space than there is on the ring */
if (ndw > (ring->ring_size / 4))
return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
radeon_ring_free_size(rdev, ring);
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
if (ndw < ring->ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev, ring->idx);
if (r)
return r;
}
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
return 0;
}
/**
* radeon_ring_lock - lock the ring and allocate space on it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Lock the ring and allocate @ndw dwords in the ring buffer
* (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
}
/**
* radeon_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{
/* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding.
*/
if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
/* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished.
*/
if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
/**
* radeon_ring_unlock_commit - tell the GPU to execute the new
* commands on the ring buffer and unlock it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{
radeon_ring_commit(rdev, ring, hdp_flush);
mutex_unlock(&rdev->ring_lock);
}
/**
* radeon_ring_undo - reset the wptr
*
* @ring: radeon_ring structure holding ring information
*
* Reset the driver's copy of the wptr (all asics).
*/
void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
}
/**
* radeon_ring_unlock_undo - reset the wptr and unlock the ring
*
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* Call radeon_ring_undo() then unlock the ring (all asics).
*/
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
/**
* radeon_ring_lockup_update - update lockup variables
*
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
void radeon_ring_lockup_update(struct radeon_device *rdev,
struct radeon_ring *ring)
{
atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
atomic64_set(&ring->last_activity, jiffies_64);
}
/**
* radeon_ring_test_lockup() - check if ring is lockedup by recording information
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
*/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
uint64_t last = atomic64_read(&ring->last_activity);
uint64_t elapsed;
if (rptr != atomic_read(&ring->last_rptr)) {
/* ring is still working, no lockup */
radeon_ring_lockup_update(rdev, ring);
return false;
}
elapsed = jiffies_to_msecs(jiffies_64 - last);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
ring->idx, elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
/**
* radeon_ring_backup - Back up the content of a ring
*
* @rdev: radeon_device pointer
* @ring: the ring we want to back up
* @data: placeholder for returned commit data
*
* Saves all unprocessed commits from a ring, returns the number of dwords saved.
*/
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data)
{
unsigned size, ptr, i;
/* just in case lock the ring */
mutex_lock(&rdev->ring_lock);
*data = NULL;
if (ring->ring_obj == NULL) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
/* it doesn't make sense to save anything if all fences are signaled */
if (!radeon_fence_count_emitted(rdev, ring->idx)) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
/* calculate the number of dw on the ring */
if (ring->rptr_save_reg)
ptr = RREG32(ring->rptr_save_reg);
else if (rdev->wb.enabled)
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
else {
/* no way to read back the next rptr */
mutex_unlock(&rdev->ring_lock);
return 0;
}
size = ring->wptr + (ring->ring_size / 4);
size -= ptr;
size &= ring->ptr_mask;
if (size == 0) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
/* and then save the content of the ring */
*data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
for (i = 0; i < size; ++i) {
(*data)[i] = ring->ring[ptr++];
ptr &= ring->ptr_mask;
}
mutex_unlock(&rdev->ring_lock);
return size;
}
/**
* radeon_ring_restore - append saved commands to the ring again
*
* @rdev: radeon_device pointer
* @ring: ring to append commands to
* @size: number of dwords we want to write
* @data: saved commands
*
* Allocates space on the ring and restore the previously saved commands.
*/
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data)
{
int i, r;
if (!size || !data)
return 0;
/* restore the saved ring content */
r = radeon_ring_lock(rdev, ring, size);
if (r)
return r;
for (i = 0; i < size; ++i) {
radeon_ring_write(ring, data[i]);
}
radeon_ring_unlock_commit(rdev, ring, false);
kvfree(data);
return 0;
}
/**
* radeon_ring_init - init driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, u32 nop)
{
int r;
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
ring->nop = nop;
ring->rdev = rdev;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
r = radeon_bo_reserve(ring->ring_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
&ring->gpu_addr);
if (r) {
radeon_bo_unreserve(ring->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
r = radeon_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
radeon_debugfs_ring_init(rdev, ring);
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
if (rdev->wb.enabled) {
u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
}
radeon_ring_lockup_update(rdev, ring);
return 0;
}
/**
* radeon_ring_fini - tear down the driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Tear down the driver information for the selected ring (all asics).
*/
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
struct radeon_bo *ring_obj;
mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(ring_obj);
radeon_bo_unpin(ring_obj);
radeon_bo_unreserve(ring_obj);
}
radeon_bo_unref(&ring_obj);
}
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_ring_info_show(struct seq_file *m, void *unused)
{
struct radeon_ring *ring = m->private;
struct radeon_device *rdev = ring->rdev;
uint32_t rptr, wptr, rptr_next;
unsigned count, i, j;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
wptr = radeon_ring_get_wptr(rdev, ring);
seq_printf(m, "wptr: 0x%08x [%5d]\n",
wptr, wptr);
rptr = radeon_ring_get_rptr(rdev, ring);
seq_printf(m, "rptr: 0x%08x [%5d]\n",
rptr, rptr);
if (ring->rptr_save_reg) {
rptr_next = RREG32(ring->rptr_save_reg);
seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
ring->rptr_save_reg, rptr_next, rptr_next);
} else
rptr_next = ~0;
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
ring->wptr, ring->wptr);
seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
ring->last_semaphore_signal_addr);
seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
if (!ring->ring)
return 0;
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
for (j = 0; j <= (count + 32); j++) {
seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
if (rptr == i)
seq_puts(m, " *");
if (rptr_next == i)
seq_puts(m, " #");
seq_puts(m, "\n");
i = (i + 1) & ring->ptr_mask;
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_ring_info);
static const char *radeon_debugfs_ring_idx_to_name(uint32_t ridx)
{
switch (ridx) {
case RADEON_RING_TYPE_GFX_INDEX:
return "radeon_ring_gfx";
case CAYMAN_RING_TYPE_CP1_INDEX:
return "radeon_ring_cp1";
case CAYMAN_RING_TYPE_CP2_INDEX:
return "radeon_ring_cp2";
case R600_RING_TYPE_DMA_INDEX:
return "radeon_ring_dma1";
case CAYMAN_RING_TYPE_DMA1_INDEX:
return "radeon_ring_dma2";
case R600_RING_TYPE_UVD_INDEX:
return "radeon_ring_uvd";
case TN_RING_TYPE_VCE1_INDEX:
return "radeon_ring_vce1";
case TN_RING_TYPE_VCE2_INDEX:
return "radeon_ring_vce2";
default:
return NULL;
}
}
#endif
static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
const char *ring_name = radeon_debugfs_ring_idx_to_name(ring->idx);
struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
if (ring_name)
debugfs_create_file(ring_name, 0444, root, ring,
&radeon_debugfs_ring_info_fops);
#endif
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra host1x Syncpoints
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#ifndef __HOST1X_SYNCPT_H
#define __HOST1X_SYNCPT_H
#include <linux/atomic.h>
#include <linux/host1x.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/sched.h>
#include "fence.h"
#include "intr.h"
struct host1x;
/* Reserved for replacing an expired wait with a NOP */
#define HOST1X_SYNCPT_RESERVED 0
struct host1x_syncpt_base {
unsigned int id;
bool requested;
};
struct host1x_syncpt {
struct kref ref;
unsigned int id;
atomic_t min_val;
atomic_t max_val;
u32 base_val;
const char *name;
bool client_managed;
struct host1x *host;
struct host1x_syncpt_base *base;
/* interrupt data */
struct host1x_fence_list fences;
/*
* If a submission incrementing this syncpoint fails, lock it so that
* further submission cannot be made until application has handled the
* failure.
*/
bool locked;
};
/* Initialize sync point array */
int host1x_syncpt_init(struct host1x *host);
/* Free sync point array */
void host1x_syncpt_deinit(struct host1x *host);
/* Return number of sync point supported. */
unsigned int host1x_syncpt_nb_pts(struct host1x *host);
/* Return number of wait bases supported. */
unsigned int host1x_syncpt_nb_bases(struct host1x *host);
/* Return number of mlocks supported. */
unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
/*
* Check sync point sanity. If max is larger than min, there have too many
* sync point increments.
*
* Client managed sync point are not tracked.
* */
static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
{
u32 max;
if (sp->client_managed)
return true;
max = host1x_syncpt_read_max(sp);
return (s32)(max - real) >= 0;
}
/* Return true if sync point is client managed. */
static inline bool host1x_syncpt_client_managed(struct host1x_syncpt *sp)
{
return sp->client_managed;
}
/*
* Returns true if syncpoint min == max, which means that there are no
* outstanding operations.
*/
static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
{
int min, max;
smp_rmb();
min = atomic_read(&sp->min_val);
max = atomic_read(&sp->max_val);
return (min == max);
}
/* Load current value from hardware to the shadow register. */
u32 host1x_syncpt_load(struct host1x_syncpt *sp);
/* Check if the given syncpoint value has already passed */
bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
/* Save host1x sync point state into shadow registers. */
void host1x_syncpt_save(struct host1x *host);
/* Reset host1x sync point state from shadow registers. */
void host1x_syncpt_restore(struct host1x *host);
/* Read current wait base value into shadow register and return it. */
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
/* Indicate future operations by incrementing the sync point max. */
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
/* Check if sync point id is valid. */
static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
{
return sp->id < host1x_syncpt_nb_pts(sp->host);
}
static inline void host1x_syncpt_set_locked(struct host1x_syncpt *sp)
{
sp->locked = true;
}
#endif
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-20 Sean Anderson <[email protected]>
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*/
/dts-v1/;
#include "k210.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
/ {
model = "Kendryte KD233";
compatible = "canaan,kendryte-kd233", "canaan,kendryte-k210";
aliases {
serial0 = &uarths0;
};
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
};
gpio-leds {
compatible = "gpio-leds";
led0 {
gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
};
led1 {
gpios = <&gpio0 9 GPIO_ACTIVE_LOW>;
};
};
gpio-keys {
compatible = "gpio-keys";
key {
label = "KEY0";
linux,code = <BTN_0>;
gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
};
};
};
&fpioa {
pinctrl-0 = <&jtag_pinctrl>;
pinctrl-names = "default";
jtag_pinctrl: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
<K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
<K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
<K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
};
uarths_pinctrl: uarths-pinmux {
pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
<K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
};
spi0_pinctrl: spi0-pinmux {
pinmux = <K210_FPIOA(6, K210_PCF_GPIOHS20)>, /* cs */
<K210_FPIOA(7, K210_PCF_SPI0_SCLK)>, /* wr */
<K210_FPIOA(8, K210_PCF_GPIOHS21)>; /* dc */
};
dvp_pinctrl: dvp-pinmux {
pinmux = <K210_FPIOA(9, K210_PCF_SCCB_SCLK)>,
<K210_FPIOA(10, K210_PCF_SCCB_SDA)>,
<K210_FPIOA(11, K210_PCF_DVP_RST)>,
<K210_FPIOA(12, K210_PCF_DVP_VSYNC)>,
<K210_FPIOA(13, K210_PCF_DVP_PWDN)>,
<K210_FPIOA(14, K210_PCF_DVP_XCLK)>,
<K210_FPIOA(15, K210_PCF_DVP_PCLK)>,
<K210_FPIOA(17, K210_PCF_DVP_HSYNC)>;
};
gpiohs_pinctrl: gpiohs-pinmux {
pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>,
<K210_FPIOA(20, K210_PCF_GPIOHS4)>, /* Rot. dip sw line 8 */
<K210_FPIOA(21, K210_PCF_GPIOHS5)>, /* Rot. dip sw line 4 */
<K210_FPIOA(22, K210_PCF_GPIOHS6)>, /* Rot. dip sw line 2 */
<K210_FPIOA(23, K210_PCF_GPIOHS7)>, /* Rot. dip sw line 1 */
<K210_FPIOA(24, K210_PCF_GPIOHS8)>,
<K210_FPIOA(25, K210_PCF_GPIOHS9)>,
<K210_FPIOA(26, K210_PCF_GPIOHS10)>;
};
spi1_pinctrl: spi1-pinmux {
pinmux = <K210_FPIOA(29, K210_PCF_SPI1_SCLK)>,
<K210_FPIOA(30, K210_PCF_SPI1_D0)>,
<K210_FPIOA(31, K210_PCF_SPI1_D1)>,
<K210_FPIOA(32, K210_PCF_GPIOHS16)>; /* cs */
};
i2s0_pinctrl: i2s0-pinmux {
pinmux = <K210_FPIOA(33, K210_PCF_I2S0_IN_D0)>,
<K210_FPIOA(34, K210_PCF_I2S0_WS)>,
<K210_FPIOA(35, K210_PCF_I2S0_SCLK)>;
};
};
&uarths0 {
pinctrl-0 = <&uarths_pinctrl>;
pinctrl-names = "default";
status = "okay";
};
&gpio0 {
pinctrl-0 = <&gpiohs_pinctrl>;
pinctrl-names = "default";
status = "okay";
};
&i2s0 {
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
status = "okay";
};
&spi0 {
pinctrl-0 = <&spi0_pinctrl>;
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
status = "okay";
panel@0 {
compatible = "canaan,kd233-tft", "ilitek,ili9341";
reg = <0>;
dc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
spi-max-frequency = <10000000>;
status = "disabled";
};
};
&spi1 {
pinctrl-0 = <&spi1_pinctrl>;
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
status = "okay";
mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
spi-max-frequency = <25000000>;
broken-cd;
};
};
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_TWO_STATE_LOCK_H
#define _BCACHEFS_TWO_STATE_LOCK_H
#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include "util.h"
/*
* Two-state lock - can be taken for add or block - both states are shared,
* like read side of rwsem, but conflict with other state:
*/
typedef struct {
atomic_long_t v;
wait_queue_head_t wait;
} two_state_lock_t;
static inline void two_state_lock_init(two_state_lock_t *lock)
{
atomic_long_set(&lock->v, 0);
init_waitqueue_head(&lock->wait);
}
static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
{
long i = s ? 1 : -1;
EBUG_ON(atomic_long_read(&lock->v) == 0);
if (atomic_long_sub_return_release(i, &lock->v) == 0)
wake_up_all(&lock->wait);
}
static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
{
long i = s ? 1 : -1;
long old;
old = atomic_long_read(&lock->v);
do {
if (i > 0 ? old < 0 : old > 0)
return false;
} while (!atomic_long_try_cmpxchg_acquire(&lock->v, &old, old + i));
return true;
}
void __bch2_two_state_lock(two_state_lock_t *, int);
static inline void bch2_two_state_lock(two_state_lock_t *lock, int s)
{
if (!bch2_two_state_trylock(lock, s))
__bch2_two_state_lock(lock, s);
}
#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun4i_lvds.h"
struct sun4i_lvds {
struct drm_connector connector;
struct drm_encoder encoder;
struct drm_panel *panel;
};
static inline struct sun4i_lvds *
drm_connector_to_sun4i_lvds(struct drm_connector *connector)
{
return container_of(connector, struct sun4i_lvds,
connector);
}
static inline struct sun4i_lvds *
drm_encoder_to_sun4i_lvds(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun4i_lvds,
encoder);
}
static int sun4i_lvds_get_modes(struct drm_connector *connector)
{
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
return drm_panel_get_modes(lvds->panel, connector);
}
static const struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
.get_modes = sun4i_lvds_get_modes,
};
static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_lvds_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
if (lvds->panel) {
drm_panel_prepare(lvds->panel);
drm_panel_enable(lvds->panel);
}
}
static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
if (lvds->panel) {
drm_panel_disable(lvds->panel);
drm_panel_unprepare(lvds->panel);
}
}
static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.disable = sun4i_lvds_encoder_disable,
.enable = sun4i_lvds_encoder_enable,
};
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct sun4i_lvds *lvds;
int ret;
lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
encoder = &lvds->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
&lvds->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
return 0;
}
drm_encoder_helper_add(&lvds->encoder,
&sun4i_lvds_enc_helper_funcs);
ret = drm_simple_encoder_init(drm, &lvds->encoder,
DRM_MODE_ENCODER_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
goto err_out;
}
/* The LVDS encoder can only work with the TCON channel 0 */
lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (lvds->panel) {
drm_connector_helper_add(&lvds->connector,
&sun4i_lvds_con_helper_funcs);
ret = drm_connector_init(drm, &lvds->connector,
&sun4i_lvds_con_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds connector\n");
goto err_cleanup_connector;
}
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
}
if (bridge) {
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto err_cleanup_connector;
}
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&lvds->encoder);
err_out:
return ret;
}
EXPORT_SYMBOL(sun4i_lvds_init);
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
#include <drm/msm_drm.h>
#include "dp_aux.h"
#include "dp_link.h"
struct edid;
struct msm_dp_display_mode {
struct drm_display_mode drm_mode;
u32 bpp;
u32 h_active_low;
u32 v_active_low;
bool out_fmt_is_yuv_420;
};
struct msm_dp_panel_in {
struct device *dev;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
struct msm_dp_catalog *catalog;
};
struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
};
struct msm_dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct msm_dp_link_info link_info;
const struct drm_edid *drm_edid;
struct drm_connector *connector;
struct msm_dp_display_mode msm_dp_mode;
struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
u32 max_dp_lanes;
u32 max_dp_link_rate;
u32 max_bw_code;
};
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
*
* Returns true if the requested link rate is supported.
*/
static inline bool is_link_rate_valid(u32 bw_code)
{
return (bw_code == DP_LINK_BW_1_62 ||
bw_code == DP_LINK_BW_2_7 ||
bw_code == DP_LINK_BW_5_4 ||
bw_code == DP_LINK_BW_8_1);
}
/**
* msm_dp_link_is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
* Returns true if the requested lane count is supported.
*/
static inline bool is_lane_count_valid(u32 lane_count)
{
return (lane_count == 1 ||
lane_count == 2 ||
lane_count == 4);
}
struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __HID_ROCCAT_ARVO_H
#define __HID_ROCCAT_ARVO_H
/*
* Copyright (c) 2011 Stefan Achatz <[email protected]>
*/
/*
*/
#include <linux/types.h>
struct arvo_mode_key { /* 2 bytes */
uint8_t command; /* ARVO_COMMAND_MODE_KEY */
uint8_t state;
} __packed;
struct arvo_button {
uint8_t unknown[24];
} __packed;
struct arvo_info {
uint8_t unknown[8];
} __packed;
struct arvo_key_mask { /* 2 bytes */
uint8_t command; /* ARVO_COMMAND_KEY_MASK */
uint8_t key_mask;
} __packed;
/* selected profile is persistent */
struct arvo_actual_profile { /* 2 bytes */
uint8_t command; /* ARVO_COMMAND_ACTUAL_PROFILE */
uint8_t actual_profile;
} __packed;
enum arvo_commands {
ARVO_COMMAND_MODE_KEY = 0x3,
ARVO_COMMAND_BUTTON = 0x4,
ARVO_COMMAND_INFO = 0x5,
ARVO_COMMAND_KEY_MASK = 0x6,
ARVO_COMMAND_ACTUAL_PROFILE = 0x7,
};
struct arvo_special_report {
uint8_t unknown1; /* always 0x01 */
uint8_t event;
uint8_t unknown2; /* always 0x70 */
} __packed;
enum arvo_special_report_events {
ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS = 0x10,
ARVO_SPECIAL_REPORT_EVENT_ACTION_RELEASE = 0x0,
};
enum arvo_special_report_event_masks {
ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION = 0xf0,
ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON = 0x0f,
};
struct arvo_roccat_report {
uint8_t profile;
uint8_t button;
uint8_t action;
} __packed;
enum arvo_roccat_report_action {
ARVO_ROCCAT_REPORT_ACTION_RELEASE = 0,
ARVO_ROCCAT_REPORT_ACTION_PRESS = 1,
};
struct arvo_device {
int roccat_claimed;
int chrdev_minor;
struct mutex arvo_lock;
int actual_profile;
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* Copyright (c) 2018 Red Hat, Inc.
* All rights reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_rmap_btree.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_rmap.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_health.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
#include "xfs_defer.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_trace.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "xfs_group.h"
/*
* xfs_initialize_perag_data
*
* Read in each per-ag structure so we can count up the number of
* allocated inodes, free inodes and used filesystem blocks as this
* information is no longer persistent in the superblock. Once we have
* this information, write it into the in-core superblock structure.
*/
int
xfs_initialize_perag_data(
struct xfs_mount *mp,
xfs_agnumber_t agcount)
{
xfs_agnumber_t index;
struct xfs_perag *pag;
struct xfs_sb *sbp = &mp->m_sb;
uint64_t ifree = 0;
uint64_t ialloc = 0;
uint64_t bfree = 0;
uint64_t bfreelst = 0;
uint64_t btree = 0;
uint64_t fdblocks;
int error = 0;
for (index = 0; index < agcount; index++) {
/*
* Read the AGF and AGI buffers to populate the per-ag
* structures for us.
*/
pag = xfs_perag_get(mp, index);
error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
if (!error)
error = xfs_ialloc_read_agi(pag, NULL, 0, NULL);
if (error) {
xfs_perag_put(pag);
return error;
}
ifree += pag->pagi_freecount;
ialloc += pag->pagi_count;
bfree += pag->pagf_freeblks;
bfreelst += pag->pagf_flcount;
btree += pag->pagf_btreeblks;
xfs_perag_put(pag);
}
fdblocks = bfree + bfreelst + btree;
/*
* If the new summary counts are obviously incorrect, fail the
* mount operation because that implies the AGFs are also corrupt.
* Clear FS_COUNTERS so that we don't unmount with a dirty log, which
* will prevent xfs_repair from fixing anything.
*/
if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
error = -EFSCORRUPTED;
goto out;
}
/* Overwrite incore superblock counters with just-read data */
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
sbp->sb_fdblocks = fdblocks;
spin_unlock(&mp->m_sb_lock);
xfs_reinit_percpu_counters(mp);
out:
xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
return error;
}
static void
xfs_perag_uninit(
struct xfs_group *xg)
{
#ifdef __KERNEL__
struct xfs_perag *pag = to_perag(xg);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_cache_destroy(&pag->pag_bcache);
#endif
}
/*
* Free up the per-ag resources within the specified AG range.
*/
void
xfs_free_perag_range(
struct xfs_mount *mp,
xfs_agnumber_t first_agno,
xfs_agnumber_t end_agno)
{
xfs_agnumber_t agno;
for (agno = first_agno; agno < end_agno; agno++)
xfs_group_free(mp, agno, XG_TYPE_AG, xfs_perag_uninit);
}
/* Find the size of the AG, in blocks. */
static xfs_agblock_t
__xfs_ag_block_count(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agnumber_t agcount,
xfs_rfsblock_t dblocks)
{
ASSERT(agno < agcount);
if (agno < agcount - 1)
return mp->m_sb.sb_agblocks;
return dblocks - (agno * mp->m_sb.sb_agblocks);
}
xfs_agblock_t
xfs_ag_block_count(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount,
mp->m_sb.sb_dblocks);
}
/* Calculate the first and last possible inode number in an AG. */
static void
__xfs_agino_range(
struct xfs_mount *mp,
xfs_agblock_t eoag,
xfs_agino_t *first,
xfs_agino_t *last)
{
xfs_agblock_t bno;
/*
* Calculate the first inode, which will be in the first
* cluster-aligned block after the AGFL.
*/
bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
*first = XFS_AGB_TO_AGINO(mp, bno);
/*
* Calculate the last inode, which will be at the end of the
* last (aligned) cluster that can be allocated in the AG.
*/
bno = round_down(eoag, M_IGEO(mp)->cluster_align);
*last = XFS_AGB_TO_AGINO(mp, bno) - 1;
}
void
xfs_agino_range(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agino_t *first,
xfs_agino_t *last)
{
return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
}
/*
* Update the perag of the previous tail AG if it has been changed during
* recovery (i.e. recovery of a growfs).
*/
int
xfs_update_last_ag_size(
struct xfs_mount *mp,
xfs_agnumber_t prev_agcount)
{
struct xfs_perag *pag = xfs_perag_grab(mp, prev_agcount - 1);
if (!pag)
return -EFSCORRUPTED;
pag_group(pag)->xg_block_count = __xfs_ag_block_count(mp,
prev_agcount - 1, mp->m_sb.sb_agcount,
mp->m_sb.sb_dblocks);
__xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
&pag->agino_max);
xfs_perag_rele(pag);
return 0;
}
static int
xfs_perag_alloc(
struct xfs_mount *mp,
xfs_agnumber_t index,
xfs_agnumber_t agcount,
xfs_rfsblock_t dblocks)
{
struct xfs_perag *pag;
int error;
pag = kzalloc(sizeof(*pag), GFP_KERNEL);
if (!pag)
return -ENOMEM;
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
spin_lock_init(&pag->pag_ici_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
#endif /* __KERNEL__ */
error = xfs_buf_cache_init(&pag->pag_bcache);
if (error)
goto out_free_perag;
/*
* Pre-calculated geometry
*/
pag_group(pag)->xg_block_count = __xfs_ag_block_count(mp, index, agcount,
dblocks);
pag_group(pag)->xg_min_gbno = XFS_AGFL_BLOCK(mp) + 1;
__xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
&pag->agino_max);
error = xfs_group_insert(mp, pag_group(pag), index, XG_TYPE_AG);
if (error)
goto out_buf_cache_destroy;
return 0;
out_buf_cache_destroy:
xfs_buf_cache_destroy(&pag->pag_bcache);
out_free_perag:
kfree(pag);
return error;
}
int
xfs_initialize_perag(
struct xfs_mount *mp,
xfs_agnumber_t orig_agcount,
xfs_agnumber_t new_agcount,
xfs_rfsblock_t dblocks,
xfs_agnumber_t *maxagi)
{
xfs_agnumber_t index;
int error;
if (orig_agcount >= new_agcount)
return 0;
for (index = orig_agcount; index < new_agcount; index++) {
error = xfs_perag_alloc(mp, index, new_agcount, dblocks);
if (error)
goto out_unwind_new_pags;
}
*maxagi = xfs_set_inode_alloc(mp, new_agcount);
mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
out_unwind_new_pags:
xfs_free_perag_range(mp, orig_agcount, index);
return error;
}
static int
xfs_get_aghdr_buf(
struct xfs_mount *mp,
xfs_daddr_t blkno,
size_t numblks,
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
int error;
error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
if (error)
return error;
bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops;
*bpp = bp;
return 0;
}
/*
* Generic btree root block init function
*/
static void
xfs_btroot_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
}
/* Finish initializing a free space btree. */
static void
xfs_freesp_init_recs(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_alloc_rec *arec;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
if (xfs_ag_contains_log(mp, id->agno)) {
struct xfs_alloc_rec *nrec;
xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
mp->m_sb.sb_logstart);
ASSERT(start >= mp->m_ag_prealloc_blocks);
if (start != mp->m_ag_prealloc_blocks) {
/*
* Modify first record to pad stripe align of log and
* bump the record count.
*/
arec->ar_blockcount = cpu_to_be32(start -
mp->m_ag_prealloc_blocks);
be16_add_cpu(&block->bb_numrecs, 1);
nrec = arec + 1;
/*
* Insert second record at start of internal log
* which then gets trimmed.
*/
nrec->ar_startblock = cpu_to_be32(
be32_to_cpu(arec->ar_startblock) +
be32_to_cpu(arec->ar_blockcount));
arec = nrec;
}
/*
* Change record start to after the internal log
*/
be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
}
/*
* Calculate the block count of this record; if it is nonzero,
* increment the record count.
*/
arec->ar_blockcount = cpu_to_be32(id->agsize -
be32_to_cpu(arec->ar_startblock));
if (arec->ar_blockcount)
be16_add_cpu(&block->bb_numrecs, 1);
}
/*
* bnobt/cntbt btree root block init functions
*/
static void
xfs_bnoroot_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
/*
* Reverse map root block init
*/
static void
xfs_rmaproot_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_rmap_rec *rrec;
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno);
/*
* mark the AG header regions as static metadata The BNO
* btree block is the first block after the headers, so
* it's location defines the size of region the static
* metadata consumes.
*
* Note: unlike mkfs, we never have to account for log
* space when growing the data regions
*/
rrec = XFS_RMAP_REC_ADDR(block, 1);
rrec->rm_startblock = 0;
rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
rrec->rm_offset = 0;
/* account freespace btree root blocks */
rrec = XFS_RMAP_REC_ADDR(block, 2);
rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(2);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
rrec->rm_offset = 0;
/* account inode btree root blocks */
rrec = XFS_RMAP_REC_ADDR(block, 3);
rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
XFS_IBT_BLOCK(mp));
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
rrec->rm_offset = 0;
/* account for rmap btree root */
rrec = XFS_RMAP_REC_ADDR(block, 4);
rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(1);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
rrec->rm_offset = 0;
/* account for refc btree root */
if (xfs_has_reflink(mp)) {
rrec = XFS_RMAP_REC_ADDR(block, 5);
rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
rrec->rm_blockcount = cpu_to_be32(1);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
}
/* account for the log space */
if (xfs_ag_contains_log(mp, id->agno)) {
rrec = XFS_RMAP_REC_ADDR(block,
be16_to_cpu(block->bb_numrecs) + 1);
rrec->rm_startblock = cpu_to_be32(
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
}
}
/*
* Initialise new secondary superblocks with the pre-grow geometry, but mark
* them as "in progress" so we know they haven't yet been activated. This will
* get cleared when the update with the new geometry information is done after
* changes to the primary are committed. This isn't strictly necessary, but we
* get it for free with the delayed buffer write lists and it means we can tell
* if a grow operation didn't complete properly after the fact.
*/
static void
xfs_sbblock_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_dsb *dsb = bp->b_addr;
xfs_sb_to_disk(dsb, &mp->m_sb);
dsb->sb_inprogress = 1;
}
static void
xfs_agfblock_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_agf *agf = bp->b_addr;
xfs_extlen_t tmpsize;
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(id->agno);
agf->agf_length = cpu_to_be32(id->agsize);
agf->agf_bno_root = cpu_to_be32(XFS_BNO_BLOCK(mp));
agf->agf_cnt_root = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_bno_level = cpu_to_be32(1);
agf->agf_cnt_level = cpu_to_be32(1);
if (xfs_has_rmapbt(mp)) {
agf->agf_rmap_root = cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_rmap_level = cpu_to_be32(1);
agf->agf_rmap_blocks = cpu_to_be32(1);
}
agf->agf_flfirst = cpu_to_be32(1);
agf->agf_fllast = 0;
agf->agf_flcount = 0;
tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
agf->agf_freeblks = cpu_to_be32(tmpsize);
agf->agf_longest = cpu_to_be32(tmpsize);
if (xfs_has_crc(mp))
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
if (xfs_has_reflink(mp)) {
agf->agf_refcount_root = cpu_to_be32(
xfs_refc_block(mp));
agf->agf_refcount_level = cpu_to_be32(1);
agf->agf_refcount_blocks = cpu_to_be32(1);
}
if (xfs_ag_contains_log(mp, id->agno)) {
int64_t logblocks = mp->m_sb.sb_logblocks;
be32_add_cpu(&agf->agf_freeblks, -logblocks);
agf->agf_longest = cpu_to_be32(id->agsize -
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
}
}
static void
xfs_agflblock_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
__be32 *agfl_bno;
int bucket;
if (xfs_has_crc(mp)) {
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(id->agno);
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
}
agfl_bno = xfs_buf_to_agfl_bno(bp);
for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
}
static void
xfs_agiblock_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
struct xfs_agi *agi = bp->b_addr;
int bucket;
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
agi->agi_seqno = cpu_to_be32(id->agno);
agi->agi_length = cpu_to_be32(id->agsize);
agi->agi_count = 0;
agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
agi->agi_level = cpu_to_be32(1);
agi->agi_freecount = 0;
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
if (xfs_has_crc(mp))
uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
if (xfs_has_finobt(mp)) {
agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
agi->agi_free_level = cpu_to_be32(1);
}
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
if (xfs_has_inobtcounts(mp)) {
agi->agi_iblocks = cpu_to_be32(1);
if (xfs_has_finobt(mp))
agi->agi_fblocks = cpu_to_be32(1);
}
}
typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
struct aghdr_init_data *id);
static int
xfs_ag_init_hdr(
struct xfs_mount *mp,
struct aghdr_init_data *id,
aghdr_init_work_f work,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
int error;
error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
if (error)
return error;
(*work)(mp, bp, id);
xfs_buf_delwri_queue(bp, &id->buffer_list);
xfs_buf_relse(bp);
return 0;
}
struct xfs_aghdr_grow_data {
xfs_daddr_t daddr;
size_t numblks;
const struct xfs_buf_ops *ops;
aghdr_init_work_f work;
const struct xfs_btree_ops *bc_ops;
bool need_init;
};
/*
* Prepare new AG headers to be written to disk. We use uncached buffers here,
* as it is assumed these new AG headers are currently beyond the currently
* valid filesystem address space. Using cached buffers would trip over EOFS
* corruption detection alogrithms in the buffer cache lookup routines.
*
* This is a non-transactional function, but the prepared buffers are added to a
* delayed write buffer list supplied by the caller so they can submit them to
* disk and wait on them as required.
*/
int
xfs_ag_init_headers(
struct xfs_mount *mp,
struct aghdr_init_data *id)
{
struct xfs_aghdr_grow_data aghdr_data[] = {
{ /* SB */
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
.numblks = XFS_FSS_TO_BB(mp, 1),
.ops = &xfs_sb_buf_ops,
.work = &xfs_sbblock_init,
.need_init = true
},
{ /* AGF */
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
.numblks = XFS_FSS_TO_BB(mp, 1),
.ops = &xfs_agf_buf_ops,
.work = &xfs_agfblock_init,
.need_init = true
},
{ /* AGFL */
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
.numblks = XFS_FSS_TO_BB(mp, 1),
.ops = &xfs_agfl_buf_ops,
.work = &xfs_agflblock_init,
.need_init = true
},
{ /* AGI */
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
.numblks = XFS_FSS_TO_BB(mp, 1),
.ops = &xfs_agi_buf_ops,
.work = &xfs_agiblock_init,
.need_init = true
},
{ /* BNO root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_bnobt_buf_ops,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_bnobt_ops,
.need_init = true
},
{ /* CNT root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_cntbt_buf_ops,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_cntbt_ops,
.need_init = true
},
{ /* INO root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_inobt_buf_ops,
.work = &xfs_btroot_init,
.bc_ops = &xfs_inobt_ops,
.need_init = true
},
{ /* FINO root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_finobt_buf_ops,
.work = &xfs_btroot_init,
.bc_ops = &xfs_finobt_ops,
.need_init = xfs_has_finobt(mp)
},
{ /* RMAP root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_rmapbt_buf_ops,
.work = &xfs_rmaproot_init,
.bc_ops = &xfs_rmapbt_ops,
.need_init = xfs_has_rmapbt(mp)
},
{ /* REFC root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_refcountbt_buf_ops,
.work = &xfs_btroot_init,
.bc_ops = &xfs_refcountbt_ops,
.need_init = xfs_has_reflink(mp)
},
{ /* NULL terminating block */
.daddr = XFS_BUF_DADDR_NULL,
}
};
struct xfs_aghdr_grow_data *dp;
int error = 0;
/* Account for AG free space in new AG */
id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
if (!dp->need_init)
continue;
id->daddr = dp->daddr;
id->numblks = dp->numblks;
id->bc_ops = dp->bc_ops;
error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
if (error)
break;
}
return error;
}
int
xfs_ag_shrink_space(
struct xfs_perag *pag,
struct xfs_trans **tpp,
xfs_extlen_t delta)
{
struct xfs_mount *mp = pag_mount(pag);
struct xfs_alloc_arg args = {
.tp = *tpp,
.mp = mp,
.pag = pag,
.minlen = delta,
.maxlen = delta,
.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
.resv = XFS_AG_RESV_NONE,
.prod = 1
};
struct xfs_buf *agibp, *agfbp;
struct xfs_agi *agi;
struct xfs_agf *agf;
xfs_agblock_t aglen;
int error, err2;
ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp);
if (error)
return error;
agi = agibp->b_addr;
error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp);
if (error)
return error;
agf = agfbp->b_addr;
aglen = be32_to_cpu(agi->agi_length);
/* some extra paranoid checks before we shrink the ag */
if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) {
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
return -EFSCORRUPTED;
}
if (delta >= aglen)
return -EINVAL;
/*
* Make sure that the last inode cluster cannot overlap with the new
* end of the AG, even if it's sparse.
*/
error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta);
if (error)
return error;
/*
* Disable perag reservations so it doesn't cause the allocation request
* to fail. We'll reestablish reservation before we return.
*/
xfs_ag_resv_free(pag);
/* internal log shouldn't also show up in the free space btrees */
error = xfs_alloc_vextent_exact_bno(&args,
xfs_agbno_to_fsb(pag, aglen - delta));
if (!error && args.agbno == NULLAGBLOCK)
error = -ENOSPC;
if (error) {
/*
* If extent allocation fails, need to roll the transaction to
* ensure that the AGFL fixup has been committed anyway.
*
* We need to hold the AGF across the roll to ensure nothing can
* access the AG for allocation until the shrink is fully
* cleaned up. And due to the resetting of the AG block
* reservation space needing to lock the AGI, we also have to
* hold that so we don't get AGI/AGF lock order inversions in
* the error handling path.
*/
xfs_trans_bhold(*tpp, agfbp);
xfs_trans_bhold(*tpp, agibp);
err2 = xfs_trans_roll(tpp);
if (err2)
return err2;
xfs_trans_bjoin(*tpp, agfbp);
xfs_trans_bjoin(*tpp, agibp);
goto resv_init_out;
}
/*
* if successfully deleted from freespace btrees, need to confirm
* per-AG reservation works as expected.
*/
be32_add_cpu(&agi->agi_length, -delta);
be32_add_cpu(&agf->agf_length, -delta);
err2 = xfs_ag_resv_init(pag, *tpp);
if (err2) {
be32_add_cpu(&agi->agi_length, delta);
be32_add_cpu(&agf->agf_length, delta);
if (err2 != -ENOSPC)
goto resv_err;
err2 = xfs_free_extent_later(*tpp, args.fsbno, delta, NULL,
XFS_AG_RESV_NONE, XFS_FREE_EXTENT_SKIP_DISCARD);
if (err2)
goto resv_err;
/*
* Roll the transaction before trying to re-init the per-ag
* reservation. The new transaction is clean so it will cancel
* without any side effects.
*/
error = xfs_defer_finish(tpp);
if (error)
return error;
error = -ENOSPC;
goto resv_init_out;
}
/* Update perag geometry */
pag_group(pag)->xg_block_count -= delta;
__xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
&pag->agino_max);
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
return 0;
resv_init_out:
err2 = xfs_ag_resv_init(pag, *tpp);
if (!err2)
return error;
resv_err:
xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return err2;
}
/*
* Extent the AG indicated by the @id by the length passed in
*/
int
xfs_ag_extend_space(
struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_extlen_t len)
{
struct xfs_mount *mp = pag_mount(pag);
struct xfs_buf *bp;
struct xfs_agi *agi;
struct xfs_agf *agf;
int error;
ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, tp, 0, &bp);
if (error)
return error;
agi = bp->b_addr;
be32_add_cpu(&agi->agi_length, len);
xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
/*
* Change agf length.
*/
error = xfs_alloc_read_agf(pag, tp, 0, &bp);
if (error)
return error;
agf = bp->b_addr;
be32_add_cpu(&agf->agf_length, len);
ASSERT(agf->agf_length == agi->agi_length);
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
/*
* Free the new space.
*
* XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
* this doesn't actually exist in the rmap btree.
*/
error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE);
if (error)
return error;
error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE);
if (error)
return error;
/* Update perag geometry */
pag_group(pag)->xg_block_count = be32_to_cpu(agf->agf_length);
__xfs_agino_range(mp, pag_group(pag)->xg_block_count, &pag->agino_min,
&pag->agino_max);
return 0;
}
/* Retrieve AG geometry. */
int
xfs_ag_get_geometry(
struct xfs_perag *pag,
struct xfs_ag_geometry *ageo)
{
struct xfs_buf *agi_bp;
struct xfs_buf *agf_bp;
struct xfs_agi *agi;
struct xfs_agf *agf;
unsigned int freeblks;
int error;
/* Lock the AG headers. */
error = xfs_ialloc_read_agi(pag, NULL, 0, &agi_bp);
if (error)
return error;
error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
if (error)
goto out_agi;
/* Fill out form. */
memset(ageo, 0, sizeof(*ageo));
ageo->ag_number = pag_agno(pag);
agi = agi_bp->b_addr;
ageo->ag_icount = be32_to_cpu(agi->agi_count);
ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
agf = agf_bp->b_addr;
ageo->ag_length = be32_to_cpu(agf->agf_length);
freeblks = pag->pagf_freeblks +
pag->pagf_flcount +
pag->pagf_btreeblks -
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
ageo->ag_freeblks = freeblks;
xfs_ag_geom_health(pag, ageo);
/* Release resources. */
xfs_buf_relse(agf_bp);
out_agi:
xfs_buf_relse(agi_bp);
return error;
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPCA500 chip based cameras initialization data
*
* V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "spca500"
#include "gspca.h"
#include "jpeg.h"
MODULE_AUTHOR("Michel Xhaard <[email protected]>");
MODULE_DESCRIPTION("GSPCA/SPCA500 USB Camera Driver");
MODULE_LICENSE("GPL");
#define QUALITY 85
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
char subtype;
#define AgfaCl20 0
#define AiptekPocketDV 1
#define BenqDC1016 2
#define CreativePCCam300 3
#define DLinkDSC350 4
#define Gsmartmini 5
#define IntelPocketPCCamera 6
#define KodakEZ200 7
#define LogitechClickSmart310 8
#define LogitechClickSmart510 9
#define LogitechTraveler 10
#define MustekGsmart300 11
#define Optimedia 12
#define PalmPixDC85 13
#define ToptroIndus 14
u8 jpeg_hdr[JPEG_HDR_SZ];
};
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format sif_mode[] = {
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
/* Frame packet header offsets for the spca500 */
#define SPCA500_OFFSET_PADDINGLB 2
#define SPCA500_OFFSET_PADDINGHB 3
#define SPCA500_OFFSET_MODE 4
#define SPCA500_OFFSET_IMGWIDTH 5
#define SPCA500_OFFSET_IMGHEIGHT 6
#define SPCA500_OFFSET_IMGMODE 7
#define SPCA500_OFFSET_QTBLINDEX 8
#define SPCA500_OFFSET_FRAMSEQ 9
#define SPCA500_OFFSET_CDSPINFO 10
#define SPCA500_OFFSET_GPIO 11
#define SPCA500_OFFSET_AUGPIO 12
#define SPCA500_OFFSET_DATA 16
static const __u16 spca500_visual_defaults[][3] = {
{0x00, 0x0003, 0x816b}, /* SSI not active sync with vsync,
* hue (H byte) = 0,
* saturation/hue enable,
* brightness/contrast enable.
*/
{0x00, 0x0000, 0x8167}, /* brightness = 0 */
{0x00, 0x0020, 0x8168}, /* contrast = 0 */
{0x00, 0x0003, 0x816b}, /* SSI not active sync with vsync,
* hue (H byte) = 0, saturation/hue enable,
* brightness/contrast enable.
* was 0x0003, now 0x0000.
*/
{0x00, 0x0000, 0x816a}, /* hue (L byte) = 0 */
{0x00, 0x0020, 0x8169}, /* saturation = 0x20 */
{0x00, 0x0050, 0x8157}, /* edge gain high threshold */
{0x00, 0x0030, 0x8158}, /* edge gain low threshold */
{0x00, 0x0028, 0x8159}, /* edge bandwidth high threshold */
{0x00, 0x000a, 0x815a}, /* edge bandwidth low threshold */
{0x00, 0x0001, 0x8202}, /* clock rate compensation = 1/25 sec/frame */
{0x0c, 0x0004, 0x0000},
/* set interface */
{}
};
static const __u16 Clicksmart510_defaults[][3] = {
{0x00, 0x00, 0x8211},
{0x00, 0x01, 0x82c0},
{0x00, 0x10, 0x82cb},
{0x00, 0x0f, 0x800d},
{0x00, 0x82, 0x8225},
{0x00, 0x21, 0x8228},
{0x00, 0x00, 0x8203},
{0x00, 0x00, 0x8204},
{0x00, 0x08, 0x8205},
{0x00, 0xf8, 0x8206},
{0x00, 0x28, 0x8207},
{0x00, 0xa0, 0x8208},
{0x00, 0x08, 0x824a},
{0x00, 0x08, 0x8214},
{0x00, 0x80, 0x82c1},
{0x00, 0x00, 0x82c2},
{0x00, 0x00, 0x82ca},
{0x00, 0x80, 0x82c1},
{0x00, 0x04, 0x82c2},
{0x00, 0x00, 0x82ca},
{0x00, 0xfc, 0x8100},
{0x00, 0xfc, 0x8105},
{0x00, 0x30, 0x8101},
{0x00, 0x00, 0x8102},
{0x00, 0x00, 0x8103},
{0x00, 0x66, 0x8107},
{0x00, 0x00, 0x816b},
{0x00, 0x00, 0x8155},
{0x00, 0x01, 0x8156},
{0x00, 0x60, 0x8157},
{0x00, 0x40, 0x8158},
{0x00, 0x0a, 0x8159},
{0x00, 0x06, 0x815a},
{0x00, 0x00, 0x813f},
{0x00, 0x00, 0x8200},
{0x00, 0x19, 0x8201},
{0x00, 0x00, 0x82c1},
{0x00, 0xa0, 0x82c2},
{0x00, 0x00, 0x82ca},
{0x00, 0x00, 0x8117},
{0x00, 0x00, 0x8118},
{0x00, 0x65, 0x8119},
{0x00, 0x00, 0x811a},
{0x00, 0x00, 0x811b},
{0x00, 0x55, 0x811c},
{0x00, 0x65, 0x811d},
{0x00, 0x55, 0x811e},
{0x00, 0x16, 0x811f},
{0x00, 0x19, 0x8120},
{0x00, 0x80, 0x8103},
{0x00, 0x83, 0x816b},
{0x00, 0x25, 0x8168},
{0x00, 0x01, 0x820f},
{0x00, 0xff, 0x8115},
{0x00, 0x48, 0x8116},
{0x00, 0x50, 0x8151},
{0x00, 0x40, 0x8152},
{0x00, 0x78, 0x8153},
{0x00, 0x40, 0x8154},
{0x00, 0x00, 0x8167},
{0x00, 0x20, 0x8168},
{0x00, 0x00, 0x816a},
{0x00, 0x03, 0x816b},
{0x00, 0x20, 0x8169},
{0x00, 0x60, 0x8157},
{0x00, 0x00, 0x8190},
{0x00, 0x00, 0x81a1},
{0x00, 0x00, 0x81b2},
{0x00, 0x27, 0x8191},
{0x00, 0x27, 0x81a2},
{0x00, 0x27, 0x81b3},
{0x00, 0x4b, 0x8192},
{0x00, 0x4b, 0x81a3},
{0x00, 0x4b, 0x81b4},
{0x00, 0x66, 0x8193},
{0x00, 0x66, 0x81a4},
{0x00, 0x66, 0x81b5},
{0x00, 0x79, 0x8194},
{0x00, 0x79, 0x81a5},
{0x00, 0x79, 0x81b6},
{0x00, 0x8a, 0x8195},
{0x00, 0x8a, 0x81a6},
{0x00, 0x8a, 0x81b7},
{0x00, 0x9b, 0x8196},
{0x00, 0x9b, 0x81a7},
{0x00, 0x9b, 0x81b8},
{0x00, 0xa6, 0x8197},
{0x00, 0xa6, 0x81a8},
{0x00, 0xa6, 0x81b9},
{0x00, 0xb2, 0x8198},
{0x00, 0xb2, 0x81a9},
{0x00, 0xb2, 0x81ba},
{0x00, 0xbe, 0x8199},
{0x00, 0xbe, 0x81aa},
{0x00, 0xbe, 0x81bb},
{0x00, 0xc8, 0x819a},
{0x00, 0xc8, 0x81ab},
{0x00, 0xc8, 0x81bc},
{0x00, 0xd2, 0x819b},
{0x00, 0xd2, 0x81ac},
{0x00, 0xd2, 0x81bd},
{0x00, 0xdb, 0x819c},
{0x00, 0xdb, 0x81ad},
{0x00, 0xdb, 0x81be},
{0x00, 0xe4, 0x819d},
{0x00, 0xe4, 0x81ae},
{0x00, 0xe4, 0x81bf},
{0x00, 0xed, 0x819e},
{0x00, 0xed, 0x81af},
{0x00, 0xed, 0x81c0},
{0x00, 0xf7, 0x819f},
{0x00, 0xf7, 0x81b0},
{0x00, 0xf7, 0x81c1},
{0x00, 0xff, 0x81a0},
{0x00, 0xff, 0x81b1},
{0x00, 0xff, 0x81c2},
{0x00, 0x03, 0x8156},
{0x00, 0x00, 0x8211},
{0x00, 0x20, 0x8168},
{0x00, 0x01, 0x8202},
{0x00, 0x30, 0x8101},
{0x00, 0x00, 0x8111},
{0x00, 0x00, 0x8112},
{0x00, 0x00, 0x8113},
{0x00, 0x00, 0x8114},
{}
};
static const __u8 qtable_creative_pccam[2][64] = {
{ /* Q-table Y-components */
0x05, 0x03, 0x03, 0x05, 0x07, 0x0c, 0x0f, 0x12,
0x04, 0x04, 0x04, 0x06, 0x08, 0x11, 0x12, 0x11,
0x04, 0x04, 0x05, 0x07, 0x0c, 0x11, 0x15, 0x11,
0x04, 0x05, 0x07, 0x09, 0x0f, 0x1a, 0x18, 0x13,
0x05, 0x07, 0x0b, 0x11, 0x14, 0x21, 0x1f, 0x17,
0x07, 0x0b, 0x11, 0x13, 0x18, 0x1f, 0x22, 0x1c,
0x0f, 0x13, 0x17, 0x1a, 0x1f, 0x24, 0x24, 0x1e,
0x16, 0x1c, 0x1d, 0x1d, 0x22, 0x1e, 0x1f, 0x1e},
{ /* Q-table C-components */
0x05, 0x05, 0x07, 0x0e, 0x1e, 0x1e, 0x1e, 0x1e,
0x05, 0x06, 0x08, 0x14, 0x1e, 0x1e, 0x1e, 0x1e,
0x07, 0x08, 0x11, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e,
0x0e, 0x14, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e,
0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e,
0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e,
0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e,
0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e}
};
static const __u8 qtable_kodak_ez200[2][64] = {
{ /* Q-table Y-components */
0x02, 0x01, 0x01, 0x02, 0x02, 0x04, 0x05, 0x06,
0x01, 0x01, 0x01, 0x02, 0x03, 0x06, 0x06, 0x06,
0x01, 0x01, 0x02, 0x02, 0x04, 0x06, 0x07, 0x06,
0x01, 0x02, 0x02, 0x03, 0x05, 0x09, 0x08, 0x06,
0x02, 0x02, 0x04, 0x06, 0x07, 0x0b, 0x0a, 0x08,
0x02, 0x04, 0x06, 0x06, 0x08, 0x0a, 0x0b, 0x09,
0x05, 0x06, 0x08, 0x09, 0x0a, 0x0c, 0x0c, 0x0a,
0x07, 0x09, 0x0a, 0x0a, 0x0b, 0x0a, 0x0a, 0x0a},
{ /* Q-table C-components */
0x02, 0x02, 0x02, 0x05, 0x0a, 0x0a, 0x0a, 0x0a,
0x02, 0x02, 0x03, 0x07, 0x0a, 0x0a, 0x0a, 0x0a,
0x02, 0x03, 0x06, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x05, 0x07, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a}
};
static const __u8 qtable_pocketdv[2][64] = {
{ /* Q-table Y-components start registers 0x8800 */
0x06, 0x04, 0x04, 0x06, 0x0a, 0x10, 0x14, 0x18,
0x05, 0x05, 0x06, 0x08, 0x0a, 0x17, 0x18, 0x16,
0x06, 0x05, 0x06, 0x0a, 0x10, 0x17, 0x1c, 0x16,
0x06, 0x07, 0x09, 0x0c, 0x14, 0x23, 0x20, 0x19,
0x07, 0x09, 0x0f, 0x16, 0x1b, 0x2c, 0x29, 0x1f,
0x0a, 0x0e, 0x16, 0x1a, 0x20, 0x2a, 0x2d, 0x25,
0x14, 0x1a, 0x1f, 0x23, 0x29, 0x30, 0x30, 0x28,
0x1d, 0x25, 0x26, 0x27, 0x2d, 0x28, 0x29, 0x28,
},
{ /* Q-table C-components start registers 0x8840 */
0x07, 0x07, 0x0a, 0x13, 0x28, 0x28, 0x28, 0x28,
0x07, 0x08, 0x0a, 0x1a, 0x28, 0x28, 0x28, 0x28,
0x0a, 0x0a, 0x16, 0x28, 0x28, 0x28, 0x28, 0x28,
0x13, 0x1a, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28}
};
/* read 'len' bytes to gspca_dev->usb_buf */
static void reg_r(struct gspca_dev *gspca_dev,
__u16 index,
__u16 length)
{
usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index, gspca_dev->usb_buf, length, 500);
}
static int reg_w(struct gspca_dev *gspca_dev,
__u16 req, __u16 index, __u16 value)
{
int ret;
gspca_dbg(gspca_dev, D_USBO, "reg write: [0x%02x] = 0x%02x\n",
index, value);
ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0)
pr_err("reg write: error %d\n", ret);
return ret;
}
/* returns: negative is error, pos or zero is data */
static int reg_r_12(struct gspca_dev *gspca_dev,
__u16 req, /* bRequest */
__u16 index, /* wIndex */
__u16 length) /* wLength (1 or 2 only) */
{
int ret;
gspca_dev->usb_buf[1] = 0;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
gspca_dev->usb_buf, length,
500); /* timeout */
if (ret < 0) {
pr_err("reg_r_12 err %d\n", ret);
return ret;
}
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
/*
* Simple function to wait for a given 8-bit value to be returned from
* a reg_read call.
* Returns: negative is error or timeout, zero is success.
*/
static int reg_r_wait(struct gspca_dev *gspca_dev,
__u16 reg, __u16 index, __u16 value)
{
int ret, cnt = 20;
while (--cnt > 0) {
ret = reg_r_12(gspca_dev, reg, index, 1);
if (ret == value)
return 0;
msleep(50);
}
return -EIO;
}
static int write_vector(struct gspca_dev *gspca_dev,
const __u16 data[][3])
{
int ret, i = 0;
while (data[i][0] != 0 || data[i][1] != 0 || data[i][2] != 0) {
ret = reg_w(gspca_dev, data[i][0], data[i][2], data[i][1]);
if (ret < 0)
return ret;
i++;
}
return 0;
}
static int spca50x_setup_qtable(struct gspca_dev *gspca_dev,
unsigned int request,
unsigned int ybase,
unsigned int cbase,
const __u8 qtable[2][64])
{
int i, err;
/* loop over y components */
for (i = 0; i < 64; i++) {
err = reg_w(gspca_dev, request, ybase + i, qtable[0][i]);
if (err < 0)
return err;
}
/* loop over c components */
for (i = 0; i < 64; i++) {
err = reg_w(gspca_dev, request, cbase + i, qtable[1][i]);
if (err < 0)
return err;
}
return 0;
}
static void spca500_ping310(struct gspca_dev *gspca_dev)
{
reg_r(gspca_dev, 0x0d04, 2);
gspca_dbg(gspca_dev, D_STREAM, "ClickSmart310 ping 0x0d04 0x%02x 0x%02x\n",
gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
}
static void spca500_clksmart310_init(struct gspca_dev *gspca_dev)
{
reg_r(gspca_dev, 0x0d05, 2);
gspca_dbg(gspca_dev, D_STREAM, "ClickSmart310 init 0x0d05 0x%02x 0x%02x\n",
gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
reg_w(gspca_dev, 0x00, 0x8167, 0x5a);
spca500_ping310(gspca_dev);
reg_w(gspca_dev, 0x00, 0x8168, 0x22);
reg_w(gspca_dev, 0x00, 0x816a, 0xc0);
reg_w(gspca_dev, 0x00, 0x816b, 0x0b);
reg_w(gspca_dev, 0x00, 0x8169, 0x25);
reg_w(gspca_dev, 0x00, 0x8157, 0x5b);
reg_w(gspca_dev, 0x00, 0x8158, 0x5b);
reg_w(gspca_dev, 0x00, 0x813f, 0x03);
reg_w(gspca_dev, 0x00, 0x8151, 0x4a);
reg_w(gspca_dev, 0x00, 0x8153, 0x78);
reg_w(gspca_dev, 0x00, 0x0d01, 0x04);
/* 00 for adjust shutter */
reg_w(gspca_dev, 0x00, 0x0d02, 0x01);
reg_w(gspca_dev, 0x00, 0x8169, 0x25);
reg_w(gspca_dev, 0x00, 0x0d01, 0x02);
}
static void spca500_setmode(struct gspca_dev *gspca_dev,
__u8 xmult, __u8 ymult)
{
int mode;
/* set x multiplier */
reg_w(gspca_dev, 0, 0x8001, xmult);
/* set y multiplier */
reg_w(gspca_dev, 0, 0x8002, ymult);
/* use compressed mode, VGA, with mode specific subsample */
mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
reg_w(gspca_dev, 0, 0x8003, mode << 4);
}
static int spca500_full_reset(struct gspca_dev *gspca_dev)
{
int err;
/* send the reset command */
err = reg_w(gspca_dev, 0xe0, 0x0001, 0x0000);
if (err < 0)
return err;
/* wait for the reset to complete */
err = reg_r_wait(gspca_dev, 0x06, 0x0000, 0x0000);
if (err < 0)
return err;
err = reg_w(gspca_dev, 0xe0, 0x0000, 0x0000);
if (err < 0)
return err;
err = reg_r_wait(gspca_dev, 0x06, 0, 0);
if (err < 0) {
gspca_err(gspca_dev, "reg_r_wait() failed\n");
return err;
}
/* all ok */
return 0;
}
/* Synchro the Bridge with sensor */
/* Maybe that will work on all spca500 chip */
/* because i only own a clicksmart310 try for that chip */
/* using spca50x_set_packet_size() cause an Ooops here */
/* usb_set_interface from kernel 2.6.x clear all the urb stuff */
/* up-port the same feature as in 2.4.x kernel */
static int spca500_synch310(struct gspca_dev *gspca_dev)
{
if (usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0) < 0) {
gspca_err(gspca_dev, "Set packet size: set interface error\n");
goto error;
}
spca500_ping310(gspca_dev);
reg_r(gspca_dev, 0x0d00, 1);
/* need alt setting here */
gspca_dbg(gspca_dev, D_PACK, "ClickSmart310 sync alt: %d\n",
gspca_dev->alt);
/* Windoze use pipe with altsetting 6 why 7 here */
if (usb_set_interface(gspca_dev->dev,
gspca_dev->iface,
gspca_dev->alt) < 0) {
gspca_err(gspca_dev, "Set packet size: set interface error\n");
goto error;
}
return 0;
error:
return -EBUSY;
}
static void spca500_reinit(struct gspca_dev *gspca_dev)
{
int err;
__u8 Data;
/* some unknown command from Aiptek pocket dv and family300 */
reg_w(gspca_dev, 0x00, 0x0d01, 0x01);
reg_w(gspca_dev, 0x00, 0x0d03, 0x00);
reg_w(gspca_dev, 0x00, 0x0d02, 0x01);
/* enable drop packet */
reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840,
qtable_pocketdv);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed on init\n");
/* set qtable index */
reg_w(gspca_dev, 0x00, 0x8880, 2);
/* family cam Quicksmart stuff */
reg_w(gspca_dev, 0x00, 0x800a, 0x00);
/* Set agc transfer: synced between frames */
reg_w(gspca_dev, 0x00, 0x820f, 0x01);
/* Init SDRAM - needed for SDRAM access */
reg_w(gspca_dev, 0x00, 0x870a, 0x04);
/*Start init sequence or stream */
reg_w(gspca_dev, 0, 0x8003, 0x00);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
msleep(2000);
if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) {
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
}
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
sd->subtype = id->driver_info;
if (sd->subtype != LogitechClickSmart310) {
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
} else {
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
}
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
/* initialisation of spca500 based cameras is deferred */
gspca_dbg(gspca_dev, D_STREAM, "SPCA500 init\n");
if (sd->subtype == LogitechClickSmart310)
spca500_clksmart310_init(gspca_dev);
/* else
spca500_initialise(gspca_dev); */
gspca_dbg(gspca_dev, D_STREAM, "SPCA500 init done\n");
return 0;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int err;
__u8 Data;
__u8 xmult, ymult;
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
if (sd->subtype == LogitechClickSmart310) {
xmult = 0x16;
ymult = 0x12;
} else {
xmult = 0x28;
ymult = 0x1e;
}
/* is there a sensor here ? */
reg_r(gspca_dev, 0x8a04, 1);
gspca_dbg(gspca_dev, D_STREAM, "Spca500 Sensor Address 0x%02x\n",
gspca_dev->usb_buf[0]);
gspca_dbg(gspca_dev, D_STREAM, "Spca500 curr_mode: %d Xmult: 0x%02x, Ymult: 0x%02x",
gspca_dev->curr_mode, xmult, ymult);
/* setup qtable */
switch (sd->subtype) {
case LogitechClickSmart310:
spca500_setmode(gspca_dev, xmult, ymult);
/* enable drop packet */
reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
reg_w(gspca_dev, 0x00, 0x8880, 3);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800, 0x8840,
qtable_creative_pccam);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
/* Init SDRAM - needed for SDRAM access */
reg_w(gspca_dev, 0x00, 0x870a, 0x04);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
msleep(500);
if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0)
gspca_err(gspca_dev, "reg_r_wait() failed\n");
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
spca500_synch310(gspca_dev);
write_vector(gspca_dev, spca500_visual_defaults);
spca500_setmode(gspca_dev, xmult, ymult);
/* enable drop packet */
err = reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
if (err < 0)
gspca_err(gspca_dev, "failed to enable drop packet\n");
reg_w(gspca_dev, 0x00, 0x8880, 3);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800, 0x8840,
qtable_creative_pccam);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
/* Init SDRAM - needed for SDRAM access */
reg_w(gspca_dev, 0x00, 0x870a, 0x04);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0)
gspca_err(gspca_dev, "reg_r_wait() failed\n");
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
break;
case CreativePCCam300: /* Creative PC-CAM 300 640x480 CCD */
case IntelPocketPCCamera: /* FIXME: Temporary fix for
* Intel Pocket PC Camera
* - NWG (Sat 29th March 2003) */
/* do a full reset */
err = spca500_full_reset(gspca_dev);
if (err < 0)
gspca_err(gspca_dev, "spca500_full_reset failed\n");
/* enable drop packet */
err = reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
if (err < 0)
gspca_err(gspca_dev, "failed to enable drop packet\n");
reg_w(gspca_dev, 0x00, 0x8880, 3);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800, 0x8840,
qtable_creative_pccam);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
spca500_setmode(gspca_dev, xmult, ymult);
reg_w(gspca_dev, 0x20, 0x0001, 0x0004);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0)
gspca_err(gspca_dev, "reg_r_wait() failed\n");
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
/* write_vector(gspca_dev, spca500_visual_defaults); */
break;
case KodakEZ200: /* Kodak EZ200 */
/* do a full reset */
err = spca500_full_reset(gspca_dev);
if (err < 0)
gspca_err(gspca_dev, "spca500_full_reset failed\n");
/* enable drop packet */
reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
reg_w(gspca_dev, 0x00, 0x8880, 0);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800, 0x8840,
qtable_kodak_ez200);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
spca500_setmode(gspca_dev, xmult, ymult);
reg_w(gspca_dev, 0x20, 0x0001, 0x0004);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0)
gspca_err(gspca_dev, "reg_r_wait() failed\n");
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
/* write_vector(gspca_dev, spca500_visual_defaults); */
break;
case BenqDC1016:
case DLinkDSC350: /* FamilyCam 300 */
case AiptekPocketDV: /* Aiptek PocketDV */
case Gsmartmini: /*Mustek Gsmart Mini */
case MustekGsmart300: /* Mustek Gsmart 300 */
case PalmPixDC85:
case Optimedia:
case ToptroIndus:
case AgfaCl20:
spca500_reinit(gspca_dev);
reg_w(gspca_dev, 0x00, 0x0d01, 0x01);
/* enable drop packet */
reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800, 0x8840, qtable_pocketdv);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
reg_w(gspca_dev, 0x00, 0x8880, 2);
/* familycam Quicksmart pocketDV stuff */
reg_w(gspca_dev, 0x00, 0x800a, 0x00);
/* Set agc transfer: synced between frames */
reg_w(gspca_dev, 0x00, 0x820f, 0x01);
/* Init SDRAM - needed for SDRAM access */
reg_w(gspca_dev, 0x00, 0x870a, 0x04);
spca500_setmode(gspca_dev, xmult, ymult);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
reg_r_wait(gspca_dev, 0, 0x8000, 0x44);
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
break;
case LogitechTraveler:
case LogitechClickSmart510:
reg_w(gspca_dev, 0x02, 0x00, 0x00);
/* enable drop packet */
reg_w(gspca_dev, 0x00, 0x850a, 0x0001);
err = spca50x_setup_qtable(gspca_dev,
0x00, 0x8800,
0x8840, qtable_creative_pccam);
if (err < 0)
gspca_err(gspca_dev, "spca50x_setup_qtable failed\n");
reg_w(gspca_dev, 0x00, 0x8880, 3);
reg_w(gspca_dev, 0x00, 0x800a, 0x00);
/* Init SDRAM - needed for SDRAM access */
reg_w(gspca_dev, 0x00, 0x870a, 0x04);
spca500_setmode(gspca_dev, xmult, ymult);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
reg_r_wait(gspca_dev, 0, 0x8000, 0x44);
reg_r(gspca_dev, 0x816b, 1);
Data = gspca_dev->usb_buf[0];
reg_w(gspca_dev, 0x00, 0x816b, Data);
write_vector(gspca_dev, Clicksmart510_defaults);
break;
}
return 0;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
reg_w(gspca_dev, 0, 0x8003, 0x00);
/* switch to video camera mode */
reg_w(gspca_dev, 0x00, 0x8000, 0x0004);
reg_r(gspca_dev, 0x8000, 1);
gspca_dbg(gspca_dev, D_STREAM, "stop SPCA500 done reg8000: 0x%2x\n",
gspca_dev->usb_buf[0]);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
int i;
static __u8 ffd9[] = {0xff, 0xd9};
/* frames are jpeg 4.1.1 without 0xff escape */
if (data[0] == 0xff) {
if (data[1] != 0x01) { /* drop packet */
/* gspca_dev->last_packet_type = DISCARD_PACKET; */
return;
}
gspca_frame_add(gspca_dev, LAST_PACKET,
ffd9, 2);
/* put the JPEG header in the new frame */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
data += SPCA500_OFFSET_DATA;
len -= SPCA500_OFFSET_DATA;
} else {
data += 1;
len -= 1;
}
/* add 0x00 after 0xff */
i = 0;
do {
if (data[i] == 0xff) {
gspca_frame_add(gspca_dev, INTER_PACKET,
data, i + 1);
len -= i;
data += i;
*data = 0x00;
i = 0;
}
i++;
} while (i < len);
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
reg_w(gspca_dev, 0x00, 0x8167,
(__u8) (val - 128));
}
static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
reg_w(gspca_dev, 0x00, 0x8168, val);
}
static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
reg_w(gspca_dev, 0x00, 0x8169, val);
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
gspca_dev->usb_err = 0;
if (!gspca_dev->streaming)
return 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
setbrightness(gspca_dev, ctrl->val);
break;
case V4L2_CID_CONTRAST:
setcontrast(gspca_dev, ctrl->val);
break;
case V4L2_CID_SATURATION:
setcolors(gspca_dev, ctrl->val);
break;
}
return gspca_dev->usb_err;
}
static const struct v4l2_ctrl_ops sd_ctrl_ops = {
.s_ctrl = sd_s_ctrl,
};
static int sd_init_controls(struct gspca_dev *gspca_dev)
{
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 3);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 63, 1, 31);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_SATURATION, 0, 63, 1, 31);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
}
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
{USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
{USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
{USB_DEVICE(0x046d, 0x0900), .driver_info = LogitechClickSmart310},
{USB_DEVICE(0x046d, 0x0901), .driver_info = LogitechClickSmart510},
{USB_DEVICE(0x04a5, 0x300c), .driver_info = BenqDC1016},
{USB_DEVICE(0x04fc, 0x7333), .driver_info = PalmPixDC85},
{USB_DEVICE(0x055f, 0xc200), .driver_info = MustekGsmart300},
{USB_DEVICE(0x055f, 0xc220), .driver_info = Gsmartmini},
{USB_DEVICE(0x06bd, 0x0404), .driver_info = AgfaCl20},
{USB_DEVICE(0x06be, 0x0800), .driver_info = Optimedia},
{USB_DEVICE(0x084d, 0x0003), .driver_info = DLinkDSC350},
{USB_DEVICE(0x08ca, 0x0103), .driver_info = AiptekPocketDV},
{USB_DEVICE(0x2899, 0x012c), .driver_info = ToptroIndus},
{USB_DEVICE(0x8086, 0x0630), .driver_info = IntelPocketPCCamera},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
#ifndef ADF_RAS_H
#define ADF_RAS_H
#include <linux/bitops.h>
#include <linux/atomic.h>
struct adf_accel_dev;
void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev);
void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev);
#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \
atomic_read(&(ras_errors).counter[ERR])
#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \
do { \
for (int err = 0; err < ADF_RAS_ERRORS; ++err) \
atomic_set(&(ras_errors).counter[err], 0); \
} while (0)
#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \
atomic_inc(&(ras_errors).counter[ERR])
#endif /* ADF_RAS_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* charon board Device Tree Source
*
* Copyright (C) 2007 Semihalf
* Marian Balakowicz <[email protected]>
*
* Copyright (C) 2010 DENX Software Engineering GmbH
* Heiko Schocher <[email protected]>
*/
/dts-v1/;
/ {
model = "anon,charon";
compatible = "anon,charon";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&mpc5200_pic>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
PowerPC,5200@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <32>;
i-cache-line-size = <32>;
d-cache-size = <0x4000>; // L1, 16K
i-cache-size = <0x4000>; // L1, 16K
timebase-frequency = <0>; // from bootloader
bus-frequency = <0>; // from bootloader
clock-frequency = <0>; // from bootloader
};
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>; // 128MB
};
soc5200@f0000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,mpc5200-immr";
ranges = <0 0xf0000000 0x0000c000>;
reg = <0xf0000000 0x00000100>;
bus-frequency = <0>; // from bootloader
system-frequency = <0>; // from bootloader
cdm@200 {
compatible = "fsl,mpc5200-cdm";
reg = <0x200 0x38>;
};
mpc5200_pic: interrupt-controller@500 {
// 5200 interrupts are encoded into two levels;
interrupt-controller;
#interrupt-cells = <3>;
compatible = "fsl,mpc5200-pic";
reg = <0x500 0x80>;
};
timer@600 { // General Purpose Timer
compatible = "fsl,mpc5200-gpt";
reg = <0x600 0x10>;
interrupts = <1 9 0>;
fsl,has-wdt;
};
can@900 {
compatible = "fsl,mpc5200-mscan";
interrupts = <2 17 0>;
reg = <0x900 0x80>;
};
can@980 {
compatible = "fsl,mpc5200-mscan";
interrupts = <2 18 0>;
reg = <0x980 0x80>;
};
gpio_simple: gpio@b00 {
compatible = "fsl,mpc5200-gpio";
reg = <0xb00 0x40>;
interrupts = <1 7 0>;
gpio-controller;
#gpio-cells = <2>;
};
usb@1000 {
compatible = "fsl,mpc5200-ohci","ohci-be";
reg = <0x1000 0xff>;
interrupts = <2 6 0>;
};
dma-controller@1200 {
device_type = "dma-controller";
compatible = "fsl,mpc5200-bestcomm";
reg = <0x1200 0x80>;
interrupts = <3 0 0 3 1 0 3 2 0 3 3 0
3 4 0 3 5 0 3 6 0 3 7 0
3 8 0 3 9 0 3 10 0 3 11 0
3 12 0 3 13 0 3 14 0 3 15 0>;
};
xlb@1f00 {
compatible = "fsl,mpc5200-xlb";
reg = <0x1f00 0x100>;
};
serial@2000 { // PSC1
compatible = "fsl,mpc5200-psc-uart";
reg = <0x2000 0x100>;
interrupts = <2 1 0>;
};
serial@2400 { // PSC3
compatible = "fsl,mpc5200-psc-uart";
reg = <0x2400 0x100>;
interrupts = <2 3 0>;
};
ethernet@3000 {
compatible = "fsl,mpc5200-fec";
reg = <0x3000 0x400>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <2 5 0>;
fixed-link = <1 1 100 0 0>;
};
mdio@3000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc5200-mdio";
reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts
interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co.
};
ata@3a00 {
compatible = "fsl,mpc5200-ata";
reg = <0x3a00 0x100>;
interrupts = <2 7 0>;
};
i2c@3d00 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc5200-i2c","fsl-i2c";
reg = <0x3d00 0x40>;
interrupts = <2 15 0>;
};
i2c@3d40 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc5200-i2c","fsl-i2c";
reg = <0x3d40 0x40>;
interrupts = <2 16 0>;
dtt@28 {
compatible = "national,lm80";
reg = <0x28>;
};
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
};
};
sram@8000 {
compatible = "fsl,mpc5200-sram";
reg = <0x8000 0x4000>;
};
};
localbus {
compatible = "fsl,mpc5200-lpb","simple-bus";
#address-cells = <2>;
#size-cells = <1>;
ranges = < 0 0 0xfc000000 0x02000000
1 0 0xe0000000 0x04000000 // CS1 range, SM501
3 0 0xe8000000 0x00080000>;
flash@0,0 {
compatible = "cfi-flash";
reg = <0 0 0x02000000>;
bank-width = <4>;
device-width = <2>;
#size-cells = <1>;
#address-cells = <1>;
};
display@1,0 {
compatible = "smi,sm501";
reg = <1 0x00000000 0x00800000
1 0x03e00000 0x00200000>;
mode = "640x480-32@60";
interrupts = <1 1 3>;
little-endian;
};
mram0@3,0 {
compatible = "mtd-ram";
reg = <3 0x00000 0x80000>;
bank-width = <1>;
};
};
pci@f0000d00 {
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
compatible = "fsl,mpc5200-pci";
reg = <0xf0000d00 0x100>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
0xc000 0 0 2 &mpc5200_pic 0 0 3
0xc000 0 0 3 &mpc5200_pic 0 0 3
0xc000 0 0 4 &mpc5200_pic 0 0 3>;
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
<0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
<0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
|
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DMUB_TRACE_BUFFER_H_
#define _DMUB_TRACE_BUFFER_H_
#include "dmub_cmd.h"
#define LOAD_DMCU_FW 1
#define LOAD_PHY_FW 2
enum dmucb_trace_code {
DMCUB__UNKNOWN,
DMCUB__MAIN_BEGIN,
DMCUB__PHY_INIT_BEGIN,
DMCUB__PHY_FW_SRAM_LOAD_BEGIN,
DMCUB__PHY_FW_SRAM_LOAD_END,
DMCUB__PHY_INIT_POLL_DONE,
DMCUB__PHY_INIT_END,
DMCUB__DMCU_ERAM_LOAD_BEGIN,
DMCUB__DMCU_ERAM_LOAD_END,
DMCUB__DMCU_ISR_LOAD_BEGIN,
DMCUB__DMCU_ISR_LOAD_END,
DMCUB__MAIN_IDLE,
DMCUB__PERF_TRACE,
DMCUB__PG_DONE,
};
struct dmcub_trace_buf_entry {
enum dmucb_trace_code trace_code;
uint32_t tick_count;
uint32_t param0;
uint32_t param1;
};
#define TRACE_BUF_SIZE (1024) //1 kB
#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry))
struct dmcub_trace_buf {
uint32_t entry_count;
uint32_t clk_freq;
struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY];
};
#endif /* _DMUB_TRACE_BUFFER_H_ */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright 2021 Gateworks Corporation
*/
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/net/ti-dp83867.h>
#include "imx8mn.dtsi"
/ {
model = "Gateworks Venice GW7902 i.MX8MN board";
compatible = "gw,imx8mn-gw7902", "fsl,imx8mn";
aliases {
rtc0 = &gsc_rtc;
rtc1 = &snvs_rtc;
usb0 = &usbotg1;
};
chosen {
stdout-path = &uart2;
};
memory@40000000 {
device_type = "memory";
reg = <0x0 0x40000000 0 0x80000000>;
};
can20m: can20m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <20000000>;
clock-output-names = "can20m";
};
gpio-keys {
compatible = "gpio-keys";
key-user-pb {
label = "user_pb";
gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
linux,code = <BTN_0>;
};
key-user-pb1x {
label = "user_pb1x";
linux,code = <BTN_1>;
interrupt-parent = <&gsc>;
interrupts = <0>;
};
key-erased {
label = "key_erased";
linux,code = <BTN_2>;
interrupt-parent = <&gsc>;
interrupts = <1>;
};
key-eeprom-wp {
label = "eeprom_wp";
linux,code = <BTN_3>;
interrupt-parent = <&gsc>;
interrupts = <2>;
};
key-tamper {
label = "tamper";
linux,code = <BTN_4>;
interrupt-parent = <&gsc>;
interrupts = <5>;
};
switch-hold {
label = "switch_hold";
linux,code = <BTN_5>;
interrupt-parent = <&gsc>;
interrupts = <7>;
};
};
led-controller {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio_leds>;
led-0 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
label = "panel1";
gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
default-state = "off";
};
led-1 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
label = "panel2";
gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
default-state = "off";
};
led-2 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
label = "panel3";
gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
default-state = "off";
};
led-3 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
label = "panel4";
gpios = <&gpio3 20 GPIO_ACTIVE_LOW>;
default-state = "off";
};
led-4 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
label = "panel5";
gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
default-state = "off";
};
};
pps {
compatible = "pps-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pps>;
gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
status = "okay";
};
reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "3P3V";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
reg_usb1_vbus: regulator-usb1 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usb1>;
regulator-name = "usb_usb1_vbus";
gpio = <&gpio2 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
reg_wifi: regulator-wifi {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_wl>;
regulator-name = "wifi";
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
startup-delay-us = <100>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
};
&A53_0 {
cpu-supply = <&buck2>;
};
&A53_1 {
cpu-supply = <&buck2>;
};
&A53_2 {
cpu-supply = <&buck2>;
};
&A53_3 {
cpu-supply = <&buck2>;
};
&ddrc {
operating-points-v2 = <&ddrc_opp_table>;
ddrc_opp_table: opp-table {
compatible = "operating-points-v2";
opp-25000000 {
opp-hz = /bits/ 64 <25000000>;
};
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
opp-750000000 {
opp-hz = /bits/ 64 <750000000>;
};
};
};
&ecspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
status = "okay";
can@0 {
compatible = "microchip,mcp2515";
reg = <0>;
clocks = <&can20m>;
interrupt-parent = <&gpio2>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
spi-max-frequency = <10000000>;
};
};
&disp_blk_ctrl {
status = "disabled";
};
/* off-board header */
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi2>;
cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
};
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
phy-mode = "rgmii-id";
phy-handle = <ðphy0>;
local-mac-address = [00 00 00 00 00 00];
status = "okay";
mdio {
#address-cells = <1>;
#size-cells = <0>;
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
};
};
};
&gpio1 {
gpio-line-names = "", "", "", "", "", "", "", "",
"m2_pwr_en", "", "", "", "", "m2_reset", "", "m2_wdis#",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "";
};
&gpio2 {
gpio-line-names = "", "", "", "", "", "", "", "",
"uart2_en#", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "";
};
&gpio3 {
gpio-line-names = "", "m2_gdis#", "", "", "", "", "", "m2_off#",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "";
};
&gpio4 {
gpio-line-names = "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "app_gpio1", "vdd_4p0_en", "uart1_rs485",
"", "uart1_term", "uart1_half", "app_gpio2",
"mipi_gpio1", "", "", "";
};
&gpio5 {
gpio-line-names = "", "", "", "mipi_gpio4",
"mipi_gpio3", "mipi_gpio2", "", "",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "";
};
&gpu {
status = "disabled";
};
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
gsc: gsc@20 {
compatible = "gw,gsc";
reg = <0x20>;
pinctrl-0 = <&pinctrl_gsc>;
interrupt-parent = <&gpio2>;
interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
#address-cells = <1>;
#size-cells = <0>;
channel@6 {
gw,mode = <0>;
reg = <0x06>;
label = "temp";
};
channel@8 {
gw,mode = <3>;
reg = <0x08>;
label = "vdd_bat";
};
channel@82 {
gw,mode = <2>;
reg = <0x82>;
label = "vin";
gw,voltage-divider-ohms = <22100 1000>;
gw,voltage-offset-microvolt = <700000>;
};
channel@84 {
gw,mode = <2>;
reg = <0x84>;
label = "vin_4p0";
gw,voltage-divider-ohms = <10000 10000>;
};
channel@86 {
gw,mode = <2>;
reg = <0x86>;
label = "vdd_3p3";
gw,voltage-divider-ohms = <10000 10000>;
};
channel@88 {
gw,mode = <2>;
reg = <0x88>;
label = "vdd_0p9";
};
channel@8c {
gw,mode = <2>;
reg = <0x8c>;
label = "vdd_soc";
};
channel@8e {
gw,mode = <2>;
reg = <0x8e>;
label = "vdd_arm";
};
channel@90 {
gw,mode = <2>;
reg = <0x90>;
label = "vdd_1p8";
};
channel@92 {
gw,mode = <2>;
reg = <0x92>;
label = "vdd_dram";
};
channel@98 {
gw,mode = <2>;
reg = <0x98>;
label = "vdd_1p0";
};
channel@9a {
gw,mode = <2>;
reg = <0x9a>;
label = "vdd_2p5";
gw,voltage-divider-ohms = <10000 10000>;
};
channel@9c {
gw,mode = <2>;
reg = <0x9c>;
label = "vdd_5p0";
gw,voltage-divider-ohms = <10000 10000>;
};
channel@a2 {
gw,mode = <2>;
reg = <0xa2>;
label = "vdd_gsc";
gw,voltage-divider-ohms = <10000 10000>;
};
};
};
gpio: gpio@23 {
compatible = "nxp,pca9555";
reg = <0x23>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&gsc>;
interrupts = <4>;
};
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio3>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
#clock-cells = <0>;
clocks = <&osc_32k>;
clock-output-names = "clk-32k-out";
regulators {
/* vdd_soc: 0.805-0.900V (typ=0.8V) */
BUCK1 {
regulator-name = "buck1";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <1250>;
};
/* vdd_arm: 0.805-1.0V (typ=0.9V) */
buck2: BUCK2 {
regulator-name = "buck2";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <1250>;
rohm,dvs-run-voltage = <1000000>;
rohm,dvs-idle-voltage = <900000>;
};
/* vdd_0p9: 0.805-1.0V (typ=0.9V) */
BUCK3 {
regulator-name = "buck3";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1350000>;
regulator-boot-on;
regulator-always-on;
};
/* vdd_3p3 */
BUCK4 {
regulator-name = "buck4";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
/* vdd_1p8 */
BUCK5 {
regulator-name = "buck5";
regulator-min-microvolt = <1605000>;
regulator-max-microvolt = <1995000>;
regulator-boot-on;
regulator-always-on;
};
/* vdd_dram */
BUCK6 {
regulator-name = "buck6";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
regulator-boot-on;
regulator-always-on;
};
/* nvcc_snvs_1p8 */
LDO1 {
regulator-name = "ldo1";
regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <1900000>;
regulator-boot-on;
regulator-always-on;
};
/* vdd_snvs_0p8 */
LDO2 {
regulator-name = "ldo2";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
};
/* vdda_1p8 */
LDO3 {
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
LDO4 {
regulator-name = "ldo4";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
};
LDO6 {
regulator-name = "ldo6";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
};
};
};
eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
pagesize = <16>;
};
eeprom@51 {
compatible = "atmel,24c02";
reg = <0x51>;
pagesize = <16>;
};
eeprom@52 {
compatible = "atmel,24c02";
reg = <0x52>;
pagesize = <16>;
};
eeprom@53 {
compatible = "atmel,24c02";
reg = <0x53>;
pagesize = <16>;
};
gsc_rtc: rtc@68 {
compatible = "dallas,ds1672";
reg = <0x68>;
};
};
&i2c2 {
clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
accelerometer@19 {
compatible = "st,lis2de12";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_accel>;
reg = <0x19>;
st,drdy-int-pin = <1>;
interrupt-parent = <&gpio1>;
interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
};
};
/* off-board header */
&i2c3 {
clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c3>;
pinctrl-1 = <&pinctrl_i2c3_gpio>;
scl-gpios = <&gpio5 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
/* off-board header */
&i2c4 {
clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c4>;
pinctrl-1 = <&pinctrl_i2c4_gpio>;
scl-gpios = <&gpio5 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio5 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&pgc_gpumix {
status = "disabled";
};
/* off-board header */
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
assigned-clocks = <&clk IMX8MN_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
assigned-clock-rates = <24576000>;
status = "okay";
};
/* RS232/RS485/RS422 selectable */
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
status = "okay";
};
/* RS232 console */
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
status = "okay";
};
/* bluetooth HCI */
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
status = "okay";
bluetooth {
compatible = "brcm,bcm4330-bt";
shutdown-gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
};
};
/* LTE Cat M1/NB1/EGPRS modem or GPS (loading option) */
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
status = "okay";
};
&usbotg1 {
dr_mode = "host";
vbus-supply = <®_usb1_vbus>;
disable-over-current;
status = "okay";
};
/* SDIO WiFi */
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2>;
pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
bus-width = <4>;
non-removable;
vmmc-supply = <®_wifi>;
#address-cells = <1>;
#size-cells = <0>;
status = "okay";
wifi@0 {
compatible = "brcm,bcm43455-fmac", "brcm,bcm4329-fmac";
reg = <0>;
};
};
/* eMMC */
&usdhc3 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc3>;
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
bus-width = <8>;
non-removable;
status = "okay";
};
&wdog1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog>;
fsl,ext-reset-output;
status = "okay";
};
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
pinctrl_hog: hoggrp {
fsl,pins = <
MX8MN_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x40000159 /* M2_GDIS# */
MX8MN_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x40000041 /* M2_PWR_EN */
MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041 /* M2_RESET */
MX8MN_IOMUXC_NAND_DATA01_GPIO3_IO7 0x40000119 /* M2_OFF# */
MX8MN_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x40000159 /* M2_WDIS# */
MX8MN_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x40000041 /* APP GPIO1 */
MX8MN_IOMUXC_SAI2_RXC_GPIO4_IO22 0x40000041 /* VDD_4P0_EN */
MX8MN_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x40000041 /* APP GPIO2 */
MX8MN_IOMUXC_SD1_DATA6_GPIO2_IO8 0x40000041 /* UART2_EN# */
MX8MN_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x40000041 /* MIPI_GPIO1 */
MX8MN_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x40000041 /* MIPI_GPIO2 */
MX8MN_IOMUXC_SPDIF_RX_GPIO5_IO4 0x40000041 /* MIPI_GPIO3/PWM2 */
MX8MN_IOMUXC_SPDIF_TX_GPIO5_IO3 0x40000041 /* MIPI_GPIO4/PWM3 */
>;
};
pinctrl_accel: accelgrp {
fsl,pins = <
MX8MN_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x159
>;
};
pinctrl_fec1: fec1grp {
fsl,pins = <
MX8MN_IOMUXC_ENET_MDC_ENET1_MDC 0x3
MX8MN_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
MX8MN_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
MX8MN_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
MX8MN_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
MX8MN_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
MX8MN_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
MX8MN_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
MX8MN_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
MX8MN_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
MX8MN_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
MX8MN_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x19 /* RST# */
MX8MN_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x19 /* IRQ# */
>;
};
pinctrl_gsc: gscgrp {
fsl,pins = <
MX8MN_IOMUXC_SD1_DATA4_GPIO2_IO6 0x40
>;
};
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MN_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
MX8MN_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
>;
};
pinctrl_i2c1_gpio: i2c1gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_I2C1_SCL_GPIO5_IO14 0x400001c3
MX8MN_IOMUXC_I2C1_SDA_GPIO5_IO15 0x400001c3
>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX8MN_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
MX8MN_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
>;
};
pinctrl_i2c2_gpio: i2c2gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_I2C2_SCL_GPIO5_IO16 0x400001c3
MX8MN_IOMUXC_I2C2_SDA_GPIO5_IO17 0x400001c3
>;
};
pinctrl_i2c3: i2c3grp {
fsl,pins = <
MX8MN_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
MX8MN_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
>;
};
pinctrl_i2c3_gpio: i2c3gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_I2C3_SCL_GPIO5_IO18 0x400001c3
MX8MN_IOMUXC_I2C3_SDA_GPIO5_IO19 0x400001c3
>;
};
pinctrl_i2c4: i2c4grp {
fsl,pins = <
MX8MN_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
MX8MN_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
>;
};
pinctrl_i2c4_gpio: i2c4gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_I2C4_SCL_GPIO5_IO20 0x400001c3
MX8MN_IOMUXC_I2C4_SDA_GPIO5_IO21 0x400001c3
>;
};
pinctrl_gpio_leds: gpioledgrp {
fsl,pins = <
MX8MN_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x19
MX8MN_IOMUXC_SAI5_RXD2_GPIO3_IO23 0x19
MX8MN_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x19
MX8MN_IOMUXC_SAI5_RXC_GPIO3_IO20 0x19
MX8MN_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x19
>;
};
pinctrl_pmic: pmicgrp {
fsl,pins = <
MX8MN_IOMUXC_NAND_DATA02_GPIO3_IO8 0x41
>;
};
pinctrl_pps: ppsgrp {
fsl,pins = <
MX8MN_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x141 /* PPS */
>;
};
pinctrl_reg_wl: regwlgrp {
fsl,pins = <
MX8MN_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41 /* WLAN_WLON */
>;
};
pinctrl_reg_usb1: regusb1grp {
fsl,pins = <
MX8MN_IOMUXC_SD1_DATA5_GPIO2_IO7 0x41
>;
};
pinctrl_sai3: sai3grp {
fsl,pins = <
MX8MN_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
MX8MN_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0xd6
MX8MN_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
MX8MN_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
MX8MN_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
>;
};
pinctrl_spi1: spi1grp {
fsl,pins = <
MX8MN_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x82
MX8MN_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82
MX8MN_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x82
MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x40
MX8MN_IOMUXC_SD1_DATA1_GPIO2_IO3 0x140 /* CAN_IRQ# */
>;
};
pinctrl_spi2: spi2grp {
fsl,pins = <
MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x40 /* SS0 */
>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MN_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
MX8MN_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
>;
};
pinctrl_uart1_gpio: uart1gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_SAI2_TXD0_GPIO4_IO26 0x40000110 /* HALF */
MX8MN_IOMUXC_SAI2_TXC_GPIO4_IO25 0x40000110 /* TERM */
MX8MN_IOMUXC_SAI2_RXD0_GPIO4_IO23 0x40000110 /* RS485 */
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
MX8MN_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
MX8MN_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
>;
};
pinctrl_uart3_gpio: uart3_gpiogrp {
fsl,pins = <
MX8MN_IOMUXC_SD2_CD_B_GPIO2_IO12 0x41 /* BT_EN# */
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
MX8MN_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
MX8MN_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
MX8MN_IOMUXC_SD1_CLK_GPIO2_IO0 0x140 /* CTS */
MX8MN_IOMUXC_SD1_CMD_GPIO2_IO1 0x140 /* RTS */
>;
};
pinctrl_uart4: uart4grp {
fsl,pins = <
MX8MN_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140
MX8MN_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140
MX8MN_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x141 /* GNSS_GASP */
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <
MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <
MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
>;
};
pinctrl_usdhc3: usdhc3grp {
fsl,pins = <
MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
>;
};
pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
fsl,pins = <
MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
>;
};
pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
fsl,pins = <
MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
>;
};
pinctrl_wdog: wdoggrp {
fsl,pins = <
MX8MN_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
>;
};
};
|
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
#include "protocol.h"
struct test_case {
char *key;
char *msg;
char *result;
};
/* we can't reuse RFC 4231 test vectors, as we have constraint on the
* input and key size.
*/
static struct test_case tests[] = {
{
.key = "0b0b0b0b0b0b0b0b",
.msg = "48692054",
.result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa",
},
{
.key = "aaaaaaaaaaaaaaaa",
.msg = "dddddddd",
.result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9",
},
{
.key = "0102030405060708",
.msg = "cdcdcdcd",
.result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d",
},
};
static void mptcp_crypto_test_basic(struct kunit *test)
{
char hmac[32], hmac_hex[65];
u32 nonce1, nonce2;
u64 key1, key2;
u8 msg[8];
int i, j;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
/* mptcp hmap will convert to be before computing the hmac */
key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0]));
key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8]));
nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0]));
nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4]));
put_unaligned_be32(nonce1, &msg[0]);
put_unaligned_be32(nonce2, &msg[4]);
mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
for (j = 0; j < 32; ++j)
sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff);
hmac_hex[64] = 0;
KUNIT_EXPECT_STREQ(test, &hmac_hex[0], tests[i].result);
}
}
static struct kunit_case mptcp_crypto_test_cases[] = {
KUNIT_CASE(mptcp_crypto_test_basic),
{}
};
static struct kunit_suite mptcp_crypto_suite = {
.name = "mptcp-crypto",
.test_cases = mptcp_crypto_test_cases,
};
kunit_test_suite(mptcp_crypto_suite);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit tests for MPTCP Crypto");
|
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H)
# error "Do not include directly, include spinlock_types.h"
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
}
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#ifndef CONFIG_PREEMPT_RT
/*
* generic rwlock type definitions and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
typedef struct {
arch_rwlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
RW_DEP_MAP_INIT(lockname) }
#else
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
RW_DEP_MAP_INIT(lockname) }
#endif
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#else /* !CONFIG_PREEMPT_RT */
#include <linux/rwbase_rt.h>
typedef struct {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
.rwbase = __RWBASE_INITIALIZER(name), \
RW_DEP_MAP_INIT(name) \
}
#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
#define DEFINE_RWLOCK(name) \
rwlock_t name = __RW_LOCK_UNLOCKED(name)
#endif /* CONFIG_PREEMPT_RT */
#endif /* __LINUX_RWLOCK_TYPES_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus SVC code
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*/
#ifndef __SVC_H
#define __SVC_H
#include <linux/types.h>
#include <linux/device.h>
struct gb_svc_l2_timer_cfg;
#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
enum gb_svc_state {
GB_SVC_STATE_RESET,
GB_SVC_STATE_PROTOCOL_VERSION,
GB_SVC_STATE_SVC_HELLO,
};
enum gb_svc_watchdog_bite {
GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
};
struct gb_svc_watchdog;
struct svc_debugfs_pwrmon_rail {
u8 id;
struct gb_svc *svc;
};
struct gb_svc {
struct device dev;
struct gb_host_device *hd;
struct gb_connection *connection;
enum gb_svc_state state;
struct ida device_id_map;
struct workqueue_struct *wq;
u16 endo_id;
u8 ap_intf_id;
u8 protocol_major;
u8 protocol_minor;
struct gb_svc_watchdog *watchdog;
enum gb_svc_watchdog_bite action;
struct dentry *debugfs_dentry;
struct svc_debugfs_pwrmon_rail *pwrmon_rails;
};
#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
struct gb_svc *gb_svc_create(struct gb_host_device *hd);
int gb_svc_add(struct gb_svc *svc);
void gb_svc_del(struct gb_svc *svc);
void gb_svc_put(struct gb_svc *svc);
int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
u8 measurement_type, u32 *value);
int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
u8 intf2_id, u8 dev2_id);
void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id, u8 cport_flags);
void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id);
int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 *value);
int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 value);
int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
u8 tx_amplitude, u8 tx_hs_equalizer,
u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
u8 flags, u32 quirks,
struct gb_svc_l2_timer_cfg *local,
struct gb_svc_l2_timer_cfg *remote);
int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
int gb_svc_ping(struct gb_svc *svc);
int gb_svc_watchdog_create(struct gb_svc *svc);
void gb_svc_watchdog_destroy(struct gb_svc *svc);
bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
#endif /* __SVC_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include "dm-bio-prison-v1.h"
#include "dm-bio-prison-v2.h"
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
/*----------------------------------------------------------------*/
#define MIN_CELLS 1024
struct prison_region {
spinlock_t lock;
struct rb_root cell;
} ____cacheline_aligned_in_smp;
struct dm_bio_prison {
mempool_t cell_pool;
unsigned int num_locks;
struct prison_region regions[] __counted_by(num_locks);
};
static struct kmem_cache *_cell_cache;
/*----------------------------------------------------------------*/
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
*/
struct dm_bio_prison *dm_bio_prison_create(void)
{
int ret;
unsigned int i, num_locks;
struct dm_bio_prison *prison;
num_locks = dm_num_hash_locks();
prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL);
if (!prison)
return NULL;
prison->num_locks = num_locks;
for (i = 0; i < prison->num_locks; i++) {
spin_lock_init(&prison->regions[i].lock);
prison->regions[i].cell = RB_ROOT;
}
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
return prison;
}
EXPORT_SYMBOL_GPL(dm_bio_prison_create);
void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
static void __setup_new_cell(struct dm_cell_key *key,
struct bio *holder,
struct dm_bio_prison_cell *cell)
{
memcpy(&cell->key, key, sizeof(cell->key));
cell->holder = holder;
bio_list_init(&cell->bios);
}
static int cmp_keys(struct dm_cell_key *lhs,
struct dm_cell_key *rhs)
{
if (lhs->virtual < rhs->virtual)
return -1;
if (lhs->virtual > rhs->virtual)
return 1;
if (lhs->dev < rhs->dev)
return -1;
if (lhs->dev > rhs->dev)
return 1;
if (lhs->block_end <= rhs->block_begin)
return -1;
if (lhs->block_begin >= rhs->block_end)
return 1;
return 0;
}
static inline unsigned int lock_nr(struct dm_cell_key *key, unsigned int num_locks)
{
return dm_hash_locks_index((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT),
num_locks);
}
bool dm_cell_key_has_valid_range(struct dm_cell_key *key)
{
if (WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE))
return false;
if (WARN_ON_ONCE((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) !=
(key->block_end - 1) >> BIO_PRISON_MAX_RANGE_SHIFT))
return false;
return true;
}
EXPORT_SYMBOL(dm_cell_key_has_valid_range);
static int __bio_detain(struct rb_root *root,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
int r;
struct rb_node **new = &root->rb_node, *parent = NULL;
while (*new) {
struct dm_bio_prison_cell *cell =
rb_entry(*new, struct dm_bio_prison_cell, node);
r = cmp_keys(key, &cell->key);
parent = *new;
if (r < 0)
new = &((*new)->rb_left);
else if (r > 0)
new = &((*new)->rb_right);
else {
if (inmate)
bio_list_add(&cell->bios, inmate);
*cell_result = cell;
return 1;
}
}
__setup_new_cell(key, inmate, cell_prealloc);
*cell_result = cell_prealloc;
rb_link_node(&cell_prealloc->node, parent, new);
rb_insert_color(&cell_prealloc->node, root);
return 0;
}
static int bio_detain(struct dm_bio_prison *prison,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
int r;
unsigned l = lock_nr(key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result);
spin_unlock_irq(&prison->regions[l].lock);
return r;
}
int dm_bio_detain(struct dm_bio_prison *prison,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
}
EXPORT_SYMBOL_GPL(dm_bio_detain);
/*
* @inmates must have been initialised prior to this call
*/
static void __cell_release(struct rb_root *root,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
rb_erase(&cell->node, root);
if (inmates) {
if (cell->holder)
bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
}
void dm_cell_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
__cell_release(&prison->regions[l].cell, cell, bios);
spin_unlock_irq(&prison->regions[l].lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct rb_root *root,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
rb_erase(&cell->node, root);
bio_list_merge(inmates, &cell->bios);
}
void dm_cell_release_no_holder(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
unsigned long flags;
spin_lock_irqsave(&prison->regions[l].lock, flags);
__cell_release_no_holder(&prison->regions[l].cell, cell, inmates);
spin_unlock_irqrestore(&prison->regions[l].lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
void dm_cell_error(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell, blk_status_t error)
{
struct bio_list bios;
struct bio *bio;
bio_list_init(&bios);
dm_cell_release(prison, cell, &bios);
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = error;
bio_endio(bio);
}
}
EXPORT_SYMBOL_GPL(dm_cell_error);
void dm_cell_visit_release(struct dm_bio_prison *prison,
void (*visit_fn)(void *, struct dm_bio_prison_cell *),
void *context,
struct dm_bio_prison_cell *cell)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
visit_fn(context, cell);
rb_erase(&cell->node, &prison->regions[l].cell);
spin_unlock_irq(&prison->regions[l].lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
/*----------------------------------------------------------------*/
#define DEFERRED_SET_SIZE 64
struct dm_deferred_entry {
struct dm_deferred_set *ds;
unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
unsigned int current_entry;
unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
struct dm_deferred_set *dm_deferred_set_create(void)
{
int i;
struct dm_deferred_set *ds;
ds = kmalloc(sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
spin_lock_init(&ds->lock);
ds->current_entry = 0;
ds->sweeper = 0;
for (i = 0; i < DEFERRED_SET_SIZE; i++) {
ds->entries[i].ds = ds;
ds->entries[i].count = 0;
INIT_LIST_HEAD(&ds->entries[i].work_items);
}
return ds;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_create);
void dm_deferred_set_destroy(struct dm_deferred_set *ds)
{
kfree(ds);
}
EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
{
unsigned long flags;
struct dm_deferred_entry *entry;
spin_lock_irqsave(&ds->lock, flags);
entry = ds->entries + ds->current_entry;
entry->count++;
spin_unlock_irqrestore(&ds->lock, flags);
return entry;
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
{
while ((ds->sweeper != ds->current_entry) &&
!ds->entries[ds->sweeper].count) {
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
ds->sweeper = ds_next(ds->sweeper);
}
if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
}
void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
{
unsigned long flags;
spin_lock_irqsave(&entry->ds->lock, flags);
BUG_ON(!entry->count);
--entry->count;
__sweep(entry->ds, head);
spin_unlock_irqrestore(&entry->ds->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
/*
* Returns 1 if deferred or 0 if no pending items to delay job.
*/
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
else {
list_add(work, &ds->entries[ds->current_entry].work_items);
next_entry = ds_next(ds->current_entry);
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
spin_unlock_irq(&ds->lock);
return r;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
/*----------------------------------------------------------------*/
static int __init dm_bio_prison_init_v1(void)
{
_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
if (!_cell_cache)
return -ENOMEM;
return 0;
}
static void dm_bio_prison_exit_v1(void)
{
kmem_cache_destroy(_cell_cache);
_cell_cache = NULL;
}
static int (*_inits[])(void) __initdata = {
dm_bio_prison_init_v1,
dm_bio_prison_init_v2,
};
static void (*_exits[])(void) = {
dm_bio_prison_exit_v1,
dm_bio_prison_exit_v2,
};
static int __init dm_bio_prison_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_bio_prison_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
}
/*
* module hooks
*/
module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);
MODULE_DESCRIPTION(DM_NAME " bio prison");
MODULE_AUTHOR("Joe Thornber <[email protected]>");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2010, Lars-Peter Clausen <[email protected]>
*/
#ifndef __LINUX_POWER_GPIO_CHARGER_H__
#define __LINUX_POWER_GPIO_CHARGER_H__
#include <linux/power_supply.h>
#include <linux/types.h>
/**
* struct gpio_charger_platform_data - platform_data for gpio_charger devices
* @name: Name for the chargers power_supply device
* @type: Type of the charger
* @supplied_to: Array of battery names to which this chargers supplies power
* @num_supplicants: Number of entries in the supplied_to array
*/
struct gpio_charger_platform_data {
const char *name;
enum power_supply_type type;
char **supplied_to;
size_t num_supplicants;
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) International Business Machines Corp., 2000-2002
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#ifndef _H_JFS_UNICODE
#define _H_JFS_UNICODE
#include <linux/slab.h>
#include <asm/byteorder.h>
#include "../nls/nls_ucs2_data.h"
#include "jfs_types.h"
extern int get_UCSname(struct component_name *, struct dentry *);
extern int jfs_strfromUCS_le(char *, const __le16 *, int, struct nls_table *);
#define free_UCSname(COMP) kfree((COMP)->name)
/*
* UniStrcpy: Copy a string
*/
static inline wchar_t *UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
{
wchar_t *anchor = ucs1; /* save the start of result string */
while ((*ucs1++ = *ucs2++));
return anchor;
}
/*
* UniStrncpy: Copy length limited string with pad
*/
static inline __le16 *UniStrncpy_le(__le16 * ucs1, const __le16 * ucs2,
size_t n)
{
__le16 *anchor = ucs1;
while (n-- && *ucs2) /* Copy the strings */
*ucs1++ = *ucs2++;
n++;
while (n--) /* Pad with nulls */
*ucs1++ = 0;
return anchor;
}
/*
* UniStrncmp_le: Compare length limited string - native to little-endian
*/
static inline int UniStrncmp_le(const wchar_t * ucs1, const __le16 * ucs2,
size_t n)
{
if (!n)
return 0; /* Null strings are equal */
while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
ucs1++;
ucs2++;
}
return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
}
/*
* UniStrncpy_to_le: Copy length limited string with pad to little-endian
*/
static inline __le16 *UniStrncpy_to_le(__le16 * ucs1, const wchar_t * ucs2,
size_t n)
{
__le16 *anchor = ucs1;
while (n-- && *ucs2) /* Copy the strings */
*ucs1++ = cpu_to_le16(*ucs2++);
n++;
while (n--) /* Pad with nulls */
*ucs1++ = 0;
return anchor;
}
/*
* UniStrncpy_from_le: Copy length limited string with pad from little-endian
*/
static inline wchar_t *UniStrncpy_from_le(wchar_t * ucs1, const __le16 * ucs2,
size_t n)
{
wchar_t *anchor = ucs1;
while (n-- && *ucs2) /* Copy the strings */
*ucs1++ = __le16_to_cpu(*ucs2++);
n++;
while (n--) /* Pad with nulls */
*ucs1++ = 0;
return anchor;
}
/*
* UniToupper: Convert a unicode character to upper case
*/
static inline wchar_t UniToupper(wchar_t uc)
{
const struct UniCaseRange *rp;
if (uc < sizeof(NlsUniUpperTable)) { /* Latin characters */
return uc + NlsUniUpperTable[uc]; /* Use base tables */
} else {
rp = NlsUniUpperRange; /* Use range tables */
while (rp->start) {
if (uc < rp->start) /* Before start of range */
return uc; /* Uppercase = input */
if (uc <= rp->end) /* In range */
return uc + rp->table[uc - rp->start];
rp++; /* Try next range */
}
}
return uc; /* Past last range */
}
/*
* UniStrupr: Upper case a unicode string
*/
static inline wchar_t *UniStrupr(wchar_t * upin)
{
wchar_t *up;
up = upin;
while (*up) { /* For all characters */
*up = UniToupper(*up);
up++;
}
return upin; /* Return input pointer */
}
#endif /* !_H_JFS_UNICODE */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* cs53l30.c -- CS53l30 ALSA Soc Audio driver
*
* Copyright 2015 Cirrus Logic, Inc.
*
* Authors: Paul Handrigan <[email protected]>,
* Tim Howe <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include "cs53l30.h"
#include "cirrus_legacy.h"
#define CS53L30_NUM_SUPPLIES 2
static const char *const cs53l30_supply_names[CS53L30_NUM_SUPPLIES] = {
"VA",
"VP",
};
struct cs53l30_private {
struct regulator_bulk_data supplies[CS53L30_NUM_SUPPLIES];
struct regmap *regmap;
struct gpio_desc *reset_gpio;
struct gpio_desc *mute_gpio;
struct clk *mclk;
bool use_sdout2;
u32 mclk_rate;
};
static const struct reg_default cs53l30_reg_defaults[] = {
{ CS53L30_PWRCTL, CS53L30_PWRCTL_DEFAULT },
{ CS53L30_MCLKCTL, CS53L30_MCLKCTL_DEFAULT },
{ CS53L30_INT_SR_CTL, CS53L30_INT_SR_CTL_DEFAULT },
{ CS53L30_MICBIAS_CTL, CS53L30_MICBIAS_CTL_DEFAULT },
{ CS53L30_ASPCFG_CTL, CS53L30_ASPCFG_CTL_DEFAULT },
{ CS53L30_ASP_CTL1, CS53L30_ASP_CTL1_DEFAULT },
{ CS53L30_ASP_TDMTX_CTL1, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
{ CS53L30_ASP_TDMTX_CTL2, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
{ CS53L30_ASP_TDMTX_CTL3, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
{ CS53L30_ASP_TDMTX_CTL4, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN1, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN2, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN3, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN4, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN5, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_TDMTX_EN6, CS53L30_ASP_TDMTX_ENx_DEFAULT },
{ CS53L30_ASP_CTL2, CS53L30_ASP_CTL2_DEFAULT },
{ CS53L30_SFT_RAMP, CS53L30_SFT_RMP_DEFAULT },
{ CS53L30_LRCK_CTL1, CS53L30_LRCK_CTLx_DEFAULT },
{ CS53L30_LRCK_CTL2, CS53L30_LRCK_CTLx_DEFAULT },
{ CS53L30_MUTEP_CTL1, CS53L30_MUTEP_CTL1_DEFAULT },
{ CS53L30_MUTEP_CTL2, CS53L30_MUTEP_CTL2_DEFAULT },
{ CS53L30_INBIAS_CTL1, CS53L30_INBIAS_CTL1_DEFAULT },
{ CS53L30_INBIAS_CTL2, CS53L30_INBIAS_CTL2_DEFAULT },
{ CS53L30_DMIC1_STR_CTL, CS53L30_DMIC1_STR_CTL_DEFAULT },
{ CS53L30_DMIC2_STR_CTL, CS53L30_DMIC2_STR_CTL_DEFAULT },
{ CS53L30_ADCDMIC1_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
{ CS53L30_ADCDMIC1_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
{ CS53L30_ADC1_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
{ CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
{ CS53L30_ADC1A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
{ CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
{ CS53L30_ADC1A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
{ CS53L30_ADC1B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
{ CS53L30_ADCDMIC2_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
{ CS53L30_ADCDMIC2_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
{ CS53L30_ADC2_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
{ CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
{ CS53L30_ADC2A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
{ CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
{ CS53L30_ADC2A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
{ CS53L30_ADC2B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
{ CS53L30_INT_MASK, CS53L30_DEVICE_INT_MASK },
};
static bool cs53l30_volatile_register(struct device *dev, unsigned int reg)
{
if (reg == CS53L30_IS)
return true;
else
return false;
}
static bool cs53l30_writeable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case CS53L30_DEVID_AB:
case CS53L30_DEVID_CD:
case CS53L30_DEVID_E:
case CS53L30_REVID:
case CS53L30_IS:
return false;
default:
return true;
}
}
static bool cs53l30_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case CS53L30_DEVID_AB:
case CS53L30_DEVID_CD:
case CS53L30_DEVID_E:
case CS53L30_REVID:
case CS53L30_PWRCTL:
case CS53L30_MCLKCTL:
case CS53L30_INT_SR_CTL:
case CS53L30_MICBIAS_CTL:
case CS53L30_ASPCFG_CTL:
case CS53L30_ASP_CTL1:
case CS53L30_ASP_TDMTX_CTL1:
case CS53L30_ASP_TDMTX_CTL2:
case CS53L30_ASP_TDMTX_CTL3:
case CS53L30_ASP_TDMTX_CTL4:
case CS53L30_ASP_TDMTX_EN1:
case CS53L30_ASP_TDMTX_EN2:
case CS53L30_ASP_TDMTX_EN3:
case CS53L30_ASP_TDMTX_EN4:
case CS53L30_ASP_TDMTX_EN5:
case CS53L30_ASP_TDMTX_EN6:
case CS53L30_ASP_CTL2:
case CS53L30_SFT_RAMP:
case CS53L30_LRCK_CTL1:
case CS53L30_LRCK_CTL2:
case CS53L30_MUTEP_CTL1:
case CS53L30_MUTEP_CTL2:
case CS53L30_INBIAS_CTL1:
case CS53L30_INBIAS_CTL2:
case CS53L30_DMIC1_STR_CTL:
case CS53L30_DMIC2_STR_CTL:
case CS53L30_ADCDMIC1_CTL1:
case CS53L30_ADCDMIC1_CTL2:
case CS53L30_ADC1_CTL3:
case CS53L30_ADC1_NG_CTL:
case CS53L30_ADC1A_AFE_CTL:
case CS53L30_ADC1B_AFE_CTL:
case CS53L30_ADC1A_DIG_VOL:
case CS53L30_ADC1B_DIG_VOL:
case CS53L30_ADCDMIC2_CTL1:
case CS53L30_ADCDMIC2_CTL2:
case CS53L30_ADC2_CTL3:
case CS53L30_ADC2_NG_CTL:
case CS53L30_ADC2A_AFE_CTL:
case CS53L30_ADC2B_AFE_CTL:
case CS53L30_ADC2A_DIG_VOL:
case CS53L30_ADC2B_DIG_VOL:
case CS53L30_INT_MASK:
return true;
default:
return false;
}
}
static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2000, 0);
static DECLARE_TLV_DB_SCALE(adc_ng_boost_tlv, 0, 3000, 0);
static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
static DECLARE_TLV_DB_SCALE(dig_tlv, -9600, 100, 1);
static DECLARE_TLV_DB_SCALE(pga_preamp_tlv, 0, 10000, 0);
static const char * const input1_sel_text[] = {
"DMIC1 On AB In",
"DMIC1 On A In",
"DMIC1 On B In",
"ADC1 On AB In",
"ADC1 On A In",
"ADC1 On B In",
"DMIC1 Off ADC1 Off",
};
static unsigned int const input1_sel_values[] = {
CS53L30_CH_TYPE,
CS53L30_ADCxB_PDN | CS53L30_CH_TYPE,
CS53L30_ADCxA_PDN | CS53L30_CH_TYPE,
CS53L30_DMICx_PDN,
CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
};
static const char * const input2_sel_text[] = {
"DMIC2 On AB In",
"DMIC2 On A In",
"DMIC2 On B In",
"ADC2 On AB In",
"ADC2 On A In",
"ADC2 On B In",
"DMIC2 Off ADC2 Off",
};
static unsigned int const input2_sel_values[] = {
0x0,
CS53L30_ADCxB_PDN,
CS53L30_ADCxA_PDN,
CS53L30_DMICx_PDN,
CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
};
static const char * const input1_route_sel_text[] = {
"ADC1_SEL", "DMIC1_SEL",
};
static const struct soc_enum input1_route_sel_enum =
SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, CS53L30_CH_TYPE_SHIFT,
ARRAY_SIZE(input1_route_sel_text),
input1_route_sel_text);
static SOC_VALUE_ENUM_SINGLE_DECL(input1_sel_enum, CS53L30_ADCDMIC1_CTL1, 0,
CS53L30_ADCDMICx_PDN_MASK, input1_sel_text,
input1_sel_values);
static const struct snd_kcontrol_new input1_route_sel_mux =
SOC_DAPM_ENUM("Input 1 Route", input1_route_sel_enum);
static const char * const input2_route_sel_text[] = {
"ADC2_SEL", "DMIC2_SEL",
};
/* Note: CS53L30_ADCDMIC1_CTL1 CH_TYPE controls inputs 1 and 2 */
static const struct soc_enum input2_route_sel_enum =
SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, 0,
ARRAY_SIZE(input2_route_sel_text),
input2_route_sel_text);
static SOC_VALUE_ENUM_SINGLE_DECL(input2_sel_enum, CS53L30_ADCDMIC2_CTL1, 0,
CS53L30_ADCDMICx_PDN_MASK, input2_sel_text,
input2_sel_values);
static const struct snd_kcontrol_new input2_route_sel_mux =
SOC_DAPM_ENUM("Input 2 Route", input2_route_sel_enum);
/*
* TB = 6144*(MCLK(int) scaling factor)/MCLK(internal)
* TB - Time base
* NOTE: If MCLK_INT_SCALE = 0, then TB=1
*/
static const char * const cs53l30_ng_delay_text[] = {
"TB*50ms", "TB*100ms", "TB*150ms", "TB*200ms",
};
static const struct soc_enum adc1_ng_delay_enum =
SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
ARRAY_SIZE(cs53l30_ng_delay_text),
cs53l30_ng_delay_text);
static const struct soc_enum adc2_ng_delay_enum =
SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
ARRAY_SIZE(cs53l30_ng_delay_text),
cs53l30_ng_delay_text);
/* The noise gate threshold selected will depend on NG Boost */
static const char * const cs53l30_ng_thres_text[] = {
"-64dB/-34dB", "-66dB/-36dB", "-70dB/-40dB", "-73dB/-43dB",
"-76dB/-46dB", "-82dB/-52dB", "-58dB", "-64dB",
};
static const struct soc_enum adc1_ng_thres_enum =
SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
ARRAY_SIZE(cs53l30_ng_thres_text),
cs53l30_ng_thres_text);
static const struct soc_enum adc2_ng_thres_enum =
SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
ARRAY_SIZE(cs53l30_ng_thres_text),
cs53l30_ng_thres_text);
/* Corner frequencies are with an Fs of 48kHz. */
static const char * const hpf_corner_freq_text[] = {
"1.86Hz", "120Hz", "235Hz", "466Hz",
};
static const struct soc_enum adc1_hpf_enum =
SOC_ENUM_SINGLE(CS53L30_ADC1_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
static const struct soc_enum adc2_hpf_enum =
SOC_ENUM_SINGLE(CS53L30_ADC2_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
SOC_SINGLE("Digital Soft-Ramp Switch", CS53L30_SFT_RAMP,
CS53L30_DIGSFT_SHIFT, 1, 0),
SOC_SINGLE("ADC1 Noise Gate Ganging Switch", CS53L30_ADC1_CTL3,
CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
SOC_SINGLE("ADC2 Noise Gate Ganging Switch", CS53L30_ADC2_CTL3,
CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
SOC_SINGLE("ADC1A Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
CS53L30_ADCxA_NG_SHIFT, 1, 0),
SOC_SINGLE("ADC1B Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
CS53L30_ADCxB_NG_SHIFT, 1, 0),
SOC_SINGLE("ADC2A Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
CS53L30_ADCxA_NG_SHIFT, 1, 0),
SOC_SINGLE("ADC2B Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
CS53L30_ADCxB_NG_SHIFT, 1, 0),
SOC_SINGLE("ADC1 Notch Filter Switch", CS53L30_ADCDMIC1_CTL2,
CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
SOC_SINGLE("ADC2 Notch Filter Switch", CS53L30_ADCDMIC2_CTL2,
CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
SOC_SINGLE("ADC1A Invert Switch", CS53L30_ADCDMIC1_CTL2,
CS53L30_ADCxA_INV_SHIFT, 1, 0),
SOC_SINGLE("ADC1B Invert Switch", CS53L30_ADCDMIC1_CTL2,
CS53L30_ADCxB_INV_SHIFT, 1, 0),
SOC_SINGLE("ADC2A Invert Switch", CS53L30_ADCDMIC2_CTL2,
CS53L30_ADCxA_INV_SHIFT, 1, 0),
SOC_SINGLE("ADC2B Invert Switch", CS53L30_ADCDMIC2_CTL2,
CS53L30_ADCxB_INV_SHIFT, 1, 0),
SOC_SINGLE_TLV("ADC1A Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
SOC_SINGLE_TLV("ADC1B Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
SOC_SINGLE_TLV("ADC2A Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
SOC_SINGLE_TLV("ADC2B Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
SOC_SINGLE_TLV("ADC1 NG Boost Volume", CS53L30_ADC1_NG_CTL,
CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
SOC_SINGLE_TLV("ADC2 NG Boost Volume", CS53L30_ADC2_NG_CTL,
CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
SOC_DOUBLE_R_TLV("ADC1 Preamplifier Volume", CS53L30_ADC1A_AFE_CTL,
CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
2, 0, pga_preamp_tlv),
SOC_DOUBLE_R_TLV("ADC2 Preamplifier Volume", CS53L30_ADC2A_AFE_CTL,
CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
2, 0, pga_preamp_tlv),
SOC_ENUM("Input 1 Channel Select", input1_sel_enum),
SOC_ENUM("Input 2 Channel Select", input2_sel_enum),
SOC_ENUM("ADC1 HPF Select", adc1_hpf_enum),
SOC_ENUM("ADC2 HPF Select", adc2_hpf_enum),
SOC_ENUM("ADC1 NG Threshold", adc1_ng_thres_enum),
SOC_ENUM("ADC2 NG Threshold", adc2_ng_thres_enum),
SOC_ENUM("ADC1 NG Delay", adc1_ng_delay_enum),
SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
};
static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("IN1_DMIC1"),
SND_SOC_DAPM_INPUT("IN2"),
SND_SOC_DAPM_INPUT("IN3_DMIC2"),
SND_SOC_DAPM_INPUT("IN4"),
SND_SOC_DAPM_SUPPLY("MIC1 Bias", CS53L30_MICBIAS_CTL,
CS53L30_MIC1_BIAS_PDN_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("MIC2 Bias", CS53L30_MICBIAS_CTL,
CS53L30_MIC2_BIAS_PDN_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("MIC3 Bias", CS53L30_MICBIAS_CTL,
CS53L30_MIC3_BIAS_PDN_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("MIC4 Bias", CS53L30_MICBIAS_CTL,
CS53L30_MIC4_BIAS_PDN_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_AIF_OUT("ASP_SDOUT1", NULL, 0, CS53L30_ASP_CTL1,
CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
SND_SOC_DAPM_AIF_OUT("ASP_SDOUT2", NULL, 0, CS53L30_ASP_CTL2,
CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
SND_SOC_DAPM_MUX("Input Mux 1", SND_SOC_NOPM, 0, 0,
&input1_route_sel_mux),
SND_SOC_DAPM_MUX("Input Mux 2", SND_SOC_NOPM, 0, 0,
&input2_route_sel_mux),
SND_SOC_DAPM_ADC("ADC1A", NULL, CS53L30_ADCDMIC1_CTL1,
CS53L30_ADCxA_PDN_SHIFT, 1),
SND_SOC_DAPM_ADC("ADC1B", NULL, CS53L30_ADCDMIC1_CTL1,
CS53L30_ADCxB_PDN_SHIFT, 1),
SND_SOC_DAPM_ADC("ADC2A", NULL, CS53L30_ADCDMIC2_CTL1,
CS53L30_ADCxA_PDN_SHIFT, 1),
SND_SOC_DAPM_ADC("ADC2B", NULL, CS53L30_ADCDMIC2_CTL1,
CS53L30_ADCxB_PDN_SHIFT, 1),
SND_SOC_DAPM_ADC("DMIC1", NULL, CS53L30_ADCDMIC1_CTL1,
CS53L30_DMICx_PDN_SHIFT, 1),
SND_SOC_DAPM_ADC("DMIC2", NULL, CS53L30_ADCDMIC2_CTL1,
CS53L30_DMICx_PDN_SHIFT, 1),
};
static const struct snd_soc_dapm_route cs53l30_dapm_routes[] = {
/* ADC Input Paths */
{"ADC1A", NULL, "IN1_DMIC1"},
{"Input Mux 1", "ADC1_SEL", "ADC1A"},
{"ADC1B", NULL, "IN2"},
{"ADC2A", NULL, "IN3_DMIC2"},
{"Input Mux 2", "ADC2_SEL", "ADC2A"},
{"ADC2B", NULL, "IN4"},
/* MIC Bias Paths */
{"ADC1A", NULL, "MIC1 Bias"},
{"ADC1B", NULL, "MIC2 Bias"},
{"ADC2A", NULL, "MIC3 Bias"},
{"ADC2B", NULL, "MIC4 Bias"},
/* DMIC Paths */
{"DMIC1", NULL, "IN1_DMIC1"},
{"Input Mux 1", "DMIC1_SEL", "DMIC1"},
{"DMIC2", NULL, "IN3_DMIC2"},
{"Input Mux 2", "DMIC2_SEL", "DMIC2"},
};
static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout1[] = {
/* Output Paths when using SDOUT1 only */
{"ASP_SDOUT1", NULL, "ADC1A" },
{"ASP_SDOUT1", NULL, "Input Mux 1"},
{"ASP_SDOUT1", NULL, "ADC1B"},
{"ASP_SDOUT1", NULL, "ADC2A"},
{"ASP_SDOUT1", NULL, "Input Mux 2"},
{"ASP_SDOUT1", NULL, "ADC2B"},
{"Capture", NULL, "ASP_SDOUT1"},
};
static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout2[] = {
/* Output Paths when using both SDOUT1 and SDOUT2 */
{"ASP_SDOUT1", NULL, "ADC1A" },
{"ASP_SDOUT1", NULL, "Input Mux 1"},
{"ASP_SDOUT1", NULL, "ADC1B"},
{"ASP_SDOUT2", NULL, "ADC2A"},
{"ASP_SDOUT2", NULL, "Input Mux 2"},
{"ASP_SDOUT2", NULL, "ADC2B"},
{"Capture", NULL, "ASP_SDOUT1"},
{"Capture", NULL, "ASP_SDOUT2"},
};
struct cs53l30_mclk_div {
u32 mclk_rate;
u32 srate;
u8 asp_rate;
u8 internal_fs_ratio;
u8 mclk_int_scale;
};
static const struct cs53l30_mclk_div cs53l30_mclk_coeffs[] = {
/* NOTE: Enable MCLK_INT_SCALE to save power. */
/* MCLK, Sample Rate, asp_rate, internal_fs_ratio, mclk_int_scale */
{5644800, 11025, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{5644800, 22050, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{5644800, 44100, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6000000, 8000, 0x1, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 11025, 0x2, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 12000, 0x4, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 16000, 0x5, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 22050, 0x6, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 24000, 0x8, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 32000, 0x9, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 44100, 0xA, 0, CS53L30_MCLK_INT_SCALE},
{6000000, 48000, 0xC, 0, CS53L30_MCLK_INT_SCALE},
{6144000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6144000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
{6400000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
};
struct cs53l30_mclkx_div {
u32 mclkx;
u8 ratio;
u8 mclkdiv;
};
static const struct cs53l30_mclkx_div cs53l30_mclkx_coeffs[] = {
{5644800, 1, CS53L30_MCLK_DIV_BY_1},
{6000000, 1, CS53L30_MCLK_DIV_BY_1},
{6144000, 1, CS53L30_MCLK_DIV_BY_1},
{11289600, 2, CS53L30_MCLK_DIV_BY_2},
{12288000, 2, CS53L30_MCLK_DIV_BY_2},
{12000000, 2, CS53L30_MCLK_DIV_BY_2},
{19200000, 3, CS53L30_MCLK_DIV_BY_3},
};
static int cs53l30_get_mclkx_coeff(int mclkx)
{
int i;
for (i = 0; i < ARRAY_SIZE(cs53l30_mclkx_coeffs); i++) {
if (cs53l30_mclkx_coeffs[i].mclkx == mclkx)
return i;
}
return -EINVAL;
}
static int cs53l30_get_mclk_coeff(int mclk_rate, int srate)
{
int i;
for (i = 0; i < ARRAY_SIZE(cs53l30_mclk_coeffs); i++) {
if (cs53l30_mclk_coeffs[i].mclk_rate == mclk_rate &&
cs53l30_mclk_coeffs[i].srate == srate)
return i;
}
return -EINVAL;
}
static int cs53l30_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
int mclkx_coeff;
u32 mclk_rate;
/* MCLKX -> MCLK */
mclkx_coeff = cs53l30_get_mclkx_coeff(freq);
if (mclkx_coeff < 0)
return mclkx_coeff;
mclk_rate = cs53l30_mclkx_coeffs[mclkx_coeff].mclkx /
cs53l30_mclkx_coeffs[mclkx_coeff].ratio;
regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
CS53L30_MCLK_DIV_MASK,
cs53l30_mclkx_coeffs[mclkx_coeff].mclkdiv);
priv->mclk_rate = mclk_rate;
return 0;
}
static int cs53l30_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
u8 aspcfg = 0, aspctl1 = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
aspcfg |= CS53L30_ASP_MS;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* Set TDM_PDN to turn off TDM mode -- Reset default */
aspctl1 |= CS53L30_ASP_TDM_PDN;
break;
case SND_SOC_DAIFMT_DSP_A:
/*
* Clear TDM_PDN to turn on TDM mode; Use ASP_SCLK_INV = 0
* with SHIFT_LEFT = 1 combination as Figure 4-13 shows in
* the CS53L30 datasheet
*/
aspctl1 |= CS53L30_SHIFT_LEFT;
break;
default:
return -EINVAL;
}
/* Check to see if the SCLK is inverted */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_NF:
case SND_SOC_DAIFMT_IB_IF:
aspcfg ^= CS53L30_ASP_SCLK_INV;
break;
default:
break;
}
regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
CS53L30_ASP_MS | CS53L30_ASP_SCLK_INV, aspcfg);
regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
CS53L30_ASP_TDM_PDN | CS53L30_SHIFT_LEFT, aspctl1);
return 0;
}
static int cs53l30_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
int srate = params_rate(params);
int mclk_coeff;
/* MCLK -> srate */
mclk_coeff = cs53l30_get_mclk_coeff(priv->mclk_rate, srate);
if (mclk_coeff < 0)
return -EINVAL;
regmap_update_bits(priv->regmap, CS53L30_INT_SR_CTL,
CS53L30_INTRNL_FS_RATIO_MASK,
cs53l30_mclk_coeffs[mclk_coeff].internal_fs_ratio);
regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
CS53L30_MCLK_INT_SCALE_MASK,
cs53l30_mclk_coeffs[mclk_coeff].mclk_int_scale);
regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
CS53L30_ASP_RATE_MASK,
cs53l30_mclk_coeffs[mclk_coeff].asp_rate);
return 0;
}
static int cs53l30_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct cs53l30_private *priv = snd_soc_component_get_drvdata(component);
unsigned int reg;
int i, inter_max_check, ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
if (dapm->bias_level == SND_SOC_BIAS_STANDBY)
regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
CS53L30_PDN_LP_MASK, 0);
break;
case SND_SOC_BIAS_STANDBY:
if (dapm->bias_level == SND_SOC_BIAS_OFF) {
ret = clk_prepare_enable(priv->mclk);
if (ret) {
dev_err(component->dev,
"failed to enable MCLK: %d\n", ret);
return ret;
}
regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
CS53L30_MCLK_DIS_MASK, 0);
regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
CS53L30_PDN_ULP_MASK, 0);
msleep(50);
} else {
regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
CS53L30_PDN_ULP_MASK,
CS53L30_PDN_ULP);
}
break;
case SND_SOC_BIAS_OFF:
regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
CS53L30_PDN_DONE, 0);
/*
* If digital softramp is set, the amount of time required
* for power down increases and depends on the digital
* volume setting.
*/
/* Set the max possible time if digsft is set */
regmap_read(priv->regmap, CS53L30_SFT_RAMP, ®);
if (reg & CS53L30_DIGSFT_MASK)
inter_max_check = CS53L30_PDN_POLL_MAX;
else
inter_max_check = 10;
regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
CS53L30_PDN_ULP_MASK,
CS53L30_PDN_ULP);
/* PDN_DONE will take a min of 20ms to be set.*/
msleep(20);
/* Clr status */
regmap_read(priv->regmap, CS53L30_IS, ®);
for (i = 0; i < inter_max_check; i++) {
if (inter_max_check < 10) {
usleep_range(1000, 1100);
regmap_read(priv->regmap, CS53L30_IS, ®);
if (reg & CS53L30_PDN_DONE)
break;
} else {
usleep_range(10000, 10100);
regmap_read(priv->regmap, CS53L30_IS, ®);
if (reg & CS53L30_PDN_DONE)
break;
}
}
/* PDN_DONE is set. We now can disable the MCLK */
regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
CS53L30_PDN_DONE, CS53L30_PDN_DONE);
regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
CS53L30_MCLK_DIS_MASK,
CS53L30_MCLK_DIS);
clk_disable_unprepare(priv->mclk);
break;
}
return 0;
}
static int cs53l30_set_tristate(struct snd_soc_dai *dai, int tristate)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
u8 val = tristate ? CS53L30_ASP_3ST : 0;
return regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
CS53L30_ASP_3ST_MASK, val);
}
/*
* Note: CS53L30 counts the slot number per byte while ASoC counts the slot
* number per slot_width. So there is a difference between the slots of ASoC
* and the slots of CS53L30.
*/
static int cs53l30_set_dai_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
unsigned int loc[CS53L30_TDM_SLOT_MAX] = {48, 48, 48, 48};
unsigned int slot_next, slot_step;
u64 tx_enable = 0;
int i;
if (!rx_mask) {
dev_err(dai->dev, "rx masks must not be 0\n");
return -EINVAL;
}
/* Assuming slot_width is not supposed to be greater than 64 */
if (slots <= 0 || slot_width <= 0 || slot_width > 64) {
dev_err(dai->dev, "invalid slot number or slot width\n");
return -EINVAL;
}
if (slot_width & 0x7) {
dev_err(dai->dev, "slot width must count in byte\n");
return -EINVAL;
}
/* How many bytes in each ASoC slot */
slot_step = slot_width >> 3;
for (i = 0; rx_mask && i < CS53L30_TDM_SLOT_MAX; i++) {
/* Find the first slot from LSB */
slot_next = __ffs(rx_mask);
/* Save the slot location by converting to CS53L30 slot */
loc[i] = slot_next * slot_step;
/* Create the mask of CS53L30 slot */
tx_enable |= (u64)((u64)(1 << slot_step) - 1) << (u64)loc[i];
/* Clear this slot from rx_mask */
rx_mask &= ~(1 << slot_next);
}
/* Error out to avoid slot shift */
if (rx_mask && i == CS53L30_TDM_SLOT_MAX) {
dev_err(dai->dev, "rx_mask exceeds max slot number: %d\n",
CS53L30_TDM_SLOT_MAX);
return -EINVAL;
}
/* Validate the last active CS53L30 slot */
slot_next = loc[i - 1] + slot_step - 1;
if (slot_next > 47) {
dev_err(dai->dev, "slot selection out of bounds: %u\n",
slot_next);
return -EINVAL;
}
for (i = 0; i < CS53L30_TDM_SLOT_MAX && loc[i] != 48; i++) {
regmap_update_bits(priv->regmap, CS53L30_ASP_TDMTX_CTL(i),
CS53L30_ASP_CHx_TX_LOC_MASK, loc[i]);
dev_dbg(dai->dev, "loc[%d]=%x\n", i, loc[i]);
}
for (i = 0; i < CS53L30_ASP_TDMTX_ENx_MAX && tx_enable; i++) {
regmap_write(priv->regmap, CS53L30_ASP_TDMTX_ENx(i),
tx_enable & 0xff);
tx_enable >>= 8;
dev_dbg(dai->dev, "en_reg=%x, tx_enable=%llx\n",
CS53L30_ASP_TDMTX_ENx(i), tx_enable & 0xff);
}
return 0;
}
static int cs53l30_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(dai->component);
gpiod_set_value_cansleep(priv->mute_gpio, mute);
return 0;
}
#define CS53L30_RATES (SNDRV_PCM_RATE_8000_48000 | \
SNDRV_PCM_RATE_12000 | \
SNDRV_PCM_RATE_24000)
#define CS53L30_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops cs53l30_ops = {
.hw_params = cs53l30_pcm_hw_params,
.set_fmt = cs53l30_set_dai_fmt,
.set_sysclk = cs53l30_set_sysclk,
.set_tristate = cs53l30_set_tristate,
.set_tdm_slot = cs53l30_set_dai_tdm_slot,
.mute_stream = cs53l30_mute_stream,
};
static struct snd_soc_dai_driver cs53l30_dai = {
.name = "cs53l30",
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 4,
.rates = CS53L30_RATES,
.formats = CS53L30_FORMATS,
},
.ops = &cs53l30_ops,
.symmetric_rate = 1,
};
static int cs53l30_component_probe(struct snd_soc_component *component)
{
struct cs53l30_private *priv = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
if (priv->use_sdout2)
snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout2,
ARRAY_SIZE(cs53l30_dapm_routes_sdout2));
else
snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout1,
ARRAY_SIZE(cs53l30_dapm_routes_sdout1));
return 0;
}
static const struct snd_soc_component_driver cs53l30_driver = {
.probe = cs53l30_component_probe,
.set_bias_level = cs53l30_set_bias_level,
.controls = cs53l30_snd_controls,
.num_controls = ARRAY_SIZE(cs53l30_snd_controls),
.dapm_widgets = cs53l30_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cs53l30_dapm_widgets),
.dapm_routes = cs53l30_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
};
static const struct regmap_config cs53l30_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = CS53L30_MAX_REGISTER,
.reg_defaults = cs53l30_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs53l30_reg_defaults),
.volatile_reg = cs53l30_volatile_register,
.writeable_reg = cs53l30_writeable_register,
.readable_reg = cs53l30_readable_register,
.cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
static int cs53l30_i2c_probe(struct i2c_client *client)
{
const struct device_node *np = client->dev.of_node;
struct device *dev = &client->dev;
struct cs53l30_private *cs53l30;
unsigned int reg;
int ret = 0, i, devid;
u8 val;
cs53l30 = devm_kzalloc(dev, sizeof(*cs53l30), GFP_KERNEL);
if (!cs53l30)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(cs53l30->supplies); i++)
cs53l30->supplies[i].supply = cs53l30_supply_names[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
if (ret) {
dev_err(dev, "failed to get supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
if (ret) {
dev_err(dev, "failed to enable supplies: %d\n", ret);
return ret;
}
/* Reset the Device */
cs53l30->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(cs53l30->reset_gpio)) {
ret = PTR_ERR(cs53l30->reset_gpio);
goto error_supplies;
}
gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
i2c_set_clientdata(client, cs53l30);
cs53l30->mclk_rate = 0;
cs53l30->regmap = devm_regmap_init_i2c(client, &cs53l30_regmap);
if (IS_ERR(cs53l30->regmap)) {
ret = PTR_ERR(cs53l30->regmap);
dev_err(dev, "regmap_init() failed: %d\n", ret);
goto error;
}
/* Initialize codec */
devid = cirrus_read_device_id(cs53l30->regmap, CS53L30_DEVID_AB);
if (devid < 0) {
ret = devid;
dev_err(dev, "Failed to read device ID: %d\n", ret);
goto error;
}
if (devid != CS53L30_DEVID) {
ret = -ENODEV;
dev_err(dev, "Device ID (%X). Expected %X\n",
devid, CS53L30_DEVID);
goto error;
}
ret = regmap_read(cs53l30->regmap, CS53L30_REVID, ®);
if (ret < 0) {
dev_err(dev, "failed to get Revision ID: %d\n", ret);
goto error;
}
/* Check if MCLK provided */
cs53l30->mclk = devm_clk_get_optional(dev, "mclk");
if (IS_ERR(cs53l30->mclk)) {
ret = PTR_ERR(cs53l30->mclk);
goto error;
}
/* Fetch the MUTE control */
cs53l30->mute_gpio = devm_gpiod_get_optional(dev, "mute",
GPIOD_OUT_HIGH);
if (IS_ERR(cs53l30->mute_gpio)) {
ret = PTR_ERR(cs53l30->mute_gpio);
goto error;
}
if (cs53l30->mute_gpio) {
/* Enable MUTE controls via MUTE pin */
regmap_write(cs53l30->regmap, CS53L30_MUTEP_CTL1,
CS53L30_MUTEP_CTL1_MUTEALL);
/* Flip the polarity of MUTE pin */
if (gpiod_is_active_low(cs53l30->mute_gpio))
regmap_update_bits(cs53l30->regmap, CS53L30_MUTEP_CTL2,
CS53L30_MUTE_PIN_POLARITY, 0);
}
if (!of_property_read_u8(np, "cirrus,micbias-lvl", &val))
regmap_update_bits(cs53l30->regmap, CS53L30_MICBIAS_CTL,
CS53L30_MIC_BIAS_CTRL_MASK, val);
if (of_property_read_bool(np, "cirrus,use-sdout2"))
cs53l30->use_sdout2 = true;
dev_info(dev, "Cirrus Logic CS53L30, Revision: %02X\n", reg & 0xFF);
ret = devm_snd_soc_register_component(dev, &cs53l30_driver, &cs53l30_dai, 1);
if (ret) {
dev_err(dev, "failed to register component: %d\n", ret);
goto error;
}
return 0;
error:
gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
error_supplies:
regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
return ret;
}
static void cs53l30_i2c_remove(struct i2c_client *client)
{
struct cs53l30_private *cs53l30 = i2c_get_clientdata(client);
/* Hold down reset */
gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
}
#ifdef CONFIG_PM
static int cs53l30_runtime_suspend(struct device *dev)
{
struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
regcache_cache_only(cs53l30->regmap, true);
/* Hold down reset */
gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
return 0;
}
static int cs53l30_runtime_resume(struct device *dev)
{
struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
if (ret) {
dev_err(dev, "failed to enable supplies: %d\n", ret);
return ret;
}
gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
regcache_cache_only(cs53l30->regmap, false);
ret = regcache_sync(cs53l30->regmap);
if (ret) {
dev_err(dev, "failed to synchronize regcache: %d\n", ret);
return ret;
}
return 0;
}
#endif
static const struct dev_pm_ops cs53l30_runtime_pm = {
SET_RUNTIME_PM_OPS(cs53l30_runtime_suspend, cs53l30_runtime_resume,
NULL)
};
static const struct of_device_id cs53l30_of_match[] = {
{ .compatible = "cirrus,cs53l30", },
{},
};
MODULE_DEVICE_TABLE(of, cs53l30_of_match);
static const struct i2c_device_id cs53l30_id[] = {
{ "cs53l30" },
{}
};
MODULE_DEVICE_TABLE(i2c, cs53l30_id);
static struct i2c_driver cs53l30_i2c_driver = {
.driver = {
.name = "cs53l30",
.of_match_table = cs53l30_of_match,
.pm = &cs53l30_runtime_pm,
},
.id_table = cs53l30_id,
.probe = cs53l30_i2c_probe,
.remove = cs53l30_i2c_remove,
};
module_i2c_driver(cs53l30_i2c_driver);
MODULE_DESCRIPTION("ASoC CS53L30 driver");
MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <[email protected]>");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* YeAH TCP
*
* For further details look at:
* https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
*
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <net/tcp.h>
#include "tcp_vegas.h"
#define TCP_YEAH_ALPHA 80 /* number of packets queued at the bottleneck */
#define TCP_YEAH_GAMMA 1 /* fraction of queue to be removed per rtt */
#define TCP_YEAH_DELTA 3 /* log minimum fraction of cwnd to be removed on loss */
#define TCP_YEAH_EPSILON 1 /* log maximum fraction to be removed on early decongestion */
#define TCP_YEAH_PHY 8 /* maximum delta from base */
#define TCP_YEAH_RHO 16 /* minimum number of consecutive rtt to consider competition on loss */
#define TCP_YEAH_ZETA 50 /* minimum number of state switches to reset reno_count */
#define TCP_SCALABLE_AI_CNT 100U
/* YeAH variables */
struct yeah {
struct vegas vegas; /* must be first */
/* YeAH */
u32 lastQ;
u32 doing_reno_now;
u32 reno_count;
u32 fast_count;
};
static void tcp_yeah_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk);
tcp_vegas_init(sk);
yeah->doing_reno_now = 0;
yeah->lastQ = 0;
yeah->reno_count = 2;
/* Ensure the MD arithmetic works. This is somewhat pedantic,
* since I don't think we will see a cwnd this large. :) */
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
}
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk))
return;
if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
goto do_vegas;
}
if (!yeah->doing_reno_now) {
/* Scalable */
tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
acked);
} else {
/* Reno */
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
}
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
*
* These are so named because they represent the approximate values
* of snd_una and snd_nxt at the beginning of the current RTT. More
* precisely, they represent the amount of data sent during the RTT.
* At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
* we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
* bytes of data have been ACKed during the course of the RTT, giving
* an "actual" rate of:
*
* (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
*
* Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
* because delayed ACKs can cover more than one segment, so they
* don't line up yeahly with the boundaries of RTTs.
*
* Another unfortunate fact of life is that delayed ACKs delay the
* advance of the left edge of our send window, so that the number
* of bytes we send in an RTT is often less than our cwnd will allow.
* So we keep track of our cwnd separately, in v_beg_snd_cwnd.
*/
do_vegas:
if (after(ack, yeah->vegas.beg_snd_nxt)) {
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
* at least one RTT sample that wasn't from a delayed ACK.
* If we only had 2 samples total,
* then that means we're getting only 1 ACK per RTT, which
* means they're almost certainly delayed ACKs.
* If we have 3 samples, we should be OK.
*/
if (yeah->vegas.cntRTT > 2) {
u32 rtt, queue;
u64 bw;
/* We have enough RTT samples, so, using the Vegas
* algorithm, we determine if we should increase or
* decrease cwnd, and by how much.
*/
/* Pluck out the RTT we are using for the Vegas
* calculations. This is the min RTT seen during the
* last RTT. Taking the min filters out the effects
* of delayed ACKs, at the cost of noticing congestion
* a bit later.
*/
rtt = yeah->vegas.minRTT;
/* Compute excess number of packets above bandwidth
* Avoid doing full 64 bit divide.
*/
bw = tcp_snd_cwnd(tp);
bw *= rtt - yeah->vegas.baseRTT;
do_div(bw, rtt);
queue = bw;
if (queue > TCP_YEAH_ALPHA ||
rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
if (queue > TCP_YEAH_ALPHA &&
tcp_snd_cwnd(tp) > yeah->reno_count) {
u32 reduction = min(queue / TCP_YEAH_GAMMA ,
tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON);
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction);
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
yeah->reno_count));
tp->snd_ssthresh = tcp_snd_cwnd(tp);
}
if (yeah->reno_count <= 2)
yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U);
else
yeah->reno_count++;
yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
0xffffffU);
} else {
yeah->fast_count++;
if (yeah->fast_count > TCP_YEAH_ZETA) {
yeah->reno_count = 2;
yeah->fast_count = 0;
}
yeah->doing_reno_now = 0;
}
yeah->lastQ = queue;
}
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt;
yeah->vegas.beg_snd_nxt = tp->snd_nxt;
yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp);
/* Wipe the slate clean for the next RTT. */
yeah->vegas.cntRTT = 0;
yeah->vegas.minRTT = 0x7fffffff;
}
}
static u32 tcp_yeah_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk);
u32 reduction;
if (yeah->doing_reno_now < TCP_YEAH_RHO) {
reduction = yeah->lastQ;
reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U));
reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA);
} else
reduction = max(tcp_snd_cwnd(tp)>>1, 2U);
yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U);
return max_t(int, tcp_snd_cwnd(tp) - reduction, 2);
}
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
.init = tcp_yeah_init,
.ssthresh = tcp_yeah_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_yeah_cong_avoid,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
.get_info = tcp_vegas_get_info,
.pkts_acked = tcp_vegas_pkts_acked,
.owner = THIS_MODULE,
.name = "yeah",
};
static int __init tcp_yeah_register(void)
{
BUILD_BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
tcp_register_congestion_control(&tcp_yeah);
return 0;
}
static void __exit tcp_yeah_unregister(void)
{
tcp_unregister_congestion_control(&tcp_yeah);
}
module_init(tcp_yeah_register);
module_exit(tcp_yeah_unregister);
MODULE_AUTHOR("Angelo P. Castellani");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("YeAH TCP");
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
#include <linux/sched/mm.h>
#include <drm/drm_cache.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{
/* Wa_13010847436 & Wa_14019519902 */
return !i915_direct_stolen_access(i915) &&
MEDIA_VER_FULL(i915) == IP_VER(13, 0);
}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
return IS_BROXTON(i915) && i915_vtd_active(i915);
}
bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
{
return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
/*
* To avoid severe over-allocation when dealing with min_page_size
* restrictions, we override that behaviour here by allowing an object
* size and page layout which can be smaller. In practice this should be
* totally fine, since GTT paging structures are not typically inserted
* into the GTT.
*
* Note that we also hit this path for the scratch page, and for this
* case it might need to be 64K, but that should work fine here since we
* used the passed in size for the page size, which should ensure it
* also has the same alignment.
*/
obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
vm->lmem_pt_obj_flags);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
* them all at once.
*/
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
if (vm->fpriv)
i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
}
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
* them all at once.
*/
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
if (vm->fpriv)
i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
}
int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
enum i915_map_type type;
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
/*
* FIXME: It is suspected that some Address Translation Service (ATS)
* issue on IOMMU is causing CAT errors to occur on some MTL workloads.
* Applying a write barrier to the ppgtt set entry functions appeared
* to have no effect, so we must temporarily use I915_MAP_WC here on
* MTL until a proper ATS solution is found.
*/
if (IS_METEORLAKE(vm->i915))
type = I915_MAP_WC;
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
enum i915_map_type type;
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
/*
* FIXME: It is suspected that some Address Translation Service (ATS)
* issue on IOMMU is causing CAT errors to occur on some MTL workloads.
* Applying a write barrier to the ppgtt set entry functions appeared
* to have no effect, so we must temporarily use I915_MAP_WC here on
* MTL until a proper ATS solution is found.
*/
if (IS_METEORLAKE(vm->i915))
type = I915_MAP_WC;
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
static void clear_vm_list(struct list_head *list)
{
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_gem_object_get_rcu(obj)) {
/*
* Object is dying, but has not yet cleared its
* vma list.
* Unbind the dying vma to ensure our list
* is completely drained. We leave the destruction to
* the object destructor to avoid the vma
* disappearing under it.
*/
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
/* Remove from the unbound list */
list_del_init(&vma->vm_link);
/*
* Delay the vm and vm mutex freeing until the
* object is done with destruction.
*/
i915_vm_resv_get(vma->vm);
vma->vm_ddestroy = true;
} else {
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
}
}
static void __i915_vm_close(struct i915_address_space *vm)
{
mutex_lock(&vm->mutex);
clear_vm_list(&vm->bound_list);
clear_vm_list(&vm->unbound_list);
/* Check for must-fix unanticipated side-effects */
GEM_BUG_ON(!list_empty(&vm->bound_list));
GEM_BUG_ON(!list_empty(&vm->unbound_list));
mutex_unlock(&vm->mutex);
}
/* lock the vm into the current ww, if we lock one, we lock all */
int i915_vm_lock_objects(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww)
{
if (vm->scratch[0]->base.resv == &vm->_resv) {
return i915_gem_object_lock(vm->scratch[0], ww);
} else {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
/* We borrowed the scratch page from ggtt, take the top level object */
return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
}
}
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
}
/**
* i915_vm_resv_release - Final struct i915_address_space destructor
* @kref: Pointer to the &i915_address_space.resv_ref member.
*
* This function is called when the last lock sharer no longer shares the
* &i915_address_space._resv lock, and also if we raced when
* destroying a vma by the vma destruction
*/
void i915_vm_resv_release(struct kref *kref)
{
struct i915_address_space *vm =
container_of(kref, typeof(*vm), resv_ref);
dma_resv_fini(&vm->_resv);
mutex_destroy(&vm->mutex);
kfree(vm);
}
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
__i915_vm_close(vm);
/* Synchronize async unbinds. */
i915_vma_resource_bind_dep_sync_all(vm);
vm->cleanup(vm);
i915_address_space_fini(vm);
i915_vm_resv_put(vm);
}
void i915_vm_release(struct kref *kref)
{
struct i915_address_space *vm =
container_of(kref, struct i915_address_space, ref);
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
queue_work(vm->i915->wq, &vm->release_work);
}
void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
/*
* Special case for GGTT that has already done an early
* kref_init here.
*/
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
* Do a dummy acquire now under fs_reclaim so that any allocation
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
} else {
/*
* CHV + BXT VTD workaround use stop_machine(),
* which is allowed to allocate memory. This means &vm->mutex
* is the outer lock, and in theory we can allocate memory inside
* it through stop_machine().
*
* Add the annotation for this, we use trylock in shrinker.
*/
mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
might_alloc(GFP_KERNEL);
mutex_release(&vm->mutex.dep_map, _THIS_IP_);
}
dma_resv_init(&vm->_resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
ARRAY_SIZE(vm->min_alignment));
if (HAS_64K_PAGES(vm->i915)) {
vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
}
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
INIT_LIST_HEAD(&vm->bound_list);
INIT_LIST_HEAD(&vm->unbound_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
{
enum i915_map_type type;
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return page_unpack_bits(p->mm.mapping, &type);
}
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return sg_dma_address(p->mm.pages->sgl);
}
struct page *__px_page(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return sg_page(p->mm.pages->sgl);
}
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
void *vaddr = __px_vaddr(p);
memset64(vaddr, val, count);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
memset(vaddr, val, scratch->base.size);
drm_clflush_virt_range(vaddr, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
{
unsigned long size;
/*
* In order to utilize 64K pages for an object with a size < 2M, we will
* need to support a 64K scratch page, given that every 16th entry for a
* page-table operating in 64K mode must point to a properly aligned 64K
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
* huge-gtt-pages, see also i915_vma_insert(). However, as we share the
* scratch (read-only) between all vm, we create one 64k scratch page
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
!HAS_64K_PAGES(vm->i915))
size = I915_GTT_PAGE_SIZE_64K;
do {
struct drm_i915_gem_object *obj;
obj = vm->alloc_scratch_dma(vm, size);
if (IS_ERR(obj))
goto skip;
if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
if (obj->mm.page_sizes.sg < size)
goto skip_obj;
/* And it needs to be correspondingly aligned */
if (__px_dma(obj) & (size - 1))
goto skip_obj;
/*
* Use a non-zero scratch page for debugging.
*
* We want a value that should be reasonably obvious
* to spot in the error state, while also causing a GPU hang
* if executed. We prefer using a clear page in production, so
* should it ever be accidentally used, the effect should be
* fairly benign.
*/
poison_scratch_page(obj);
vm->scratch[0] = obj;
vm->scratch_order = get_order(size);
return 0;
skip_obj:
i915_gem_object_put(obj);
skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
void free_scratch(struct i915_address_space *vm)
{
int i;
if (!vm->scratch[0])
return;
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
void gtt_write_workarounds(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
/*
* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
if (IS_BROADWELL(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_GEN9_LP(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
* To support 64K PTEs we need to first enable the use of the
* Intermediate-Page-Size(IPS) bit of the PDE field via some magical
* mmio, otherwise the page-walker will simply ignore the IPS bit. This
* shouldn't be needed after GEN10.
*
* 64K pages were first introduced from BDW+, although technically they
* only *work* from gen9+. For pre-BDW we instead have the option for
* 32K pages, but we don't currently have any support for it in our
* driver.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
GRAPHICS_VER(i915) <= 10)
intel_uncore_rmw(uncore,
GEN8_GAMW_ECO_DEV_RW_IA,
0,
GAMW_ECO_ENABLE_64K_IPS_FIELD);
if (IS_GRAPHICS_VER(i915, 8, 11)) {
bool can_use_gtt_cache = true;
/*
* According to the BSpec if we use 2M/1G pages then we also
* need to disable the GTT cache. At least on BDW we can see
* visual corruption when using 2M pages, and not disabling the
* GTT cache.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
can_use_gtt_cache = false;
/* WaGttCachingOffByDefault */
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
gt_WARN_ON_ONCE(gt, can_use_gtt_cache &&
intel_uncore_read(uncore,
HSW_GTT_CACHE_EN) == 0);
}
}
static void xelpmp_setup_private_ppat(struct intel_uncore *uncore)
{
intel_uncore_write(uncore, XELPMP_PAT_INDEX(0),
MTL_PPAT_L4_0_WB);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(1),
MTL_PPAT_L4_1_WT);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(2),
MTL_PPAT_L4_3_UC);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(3),
MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(4),
MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
/*
* Remaining PAT entries are left at the hardware-default
* fully-cached setting
*/
}
static void xelpg_setup_private_ppat(struct intel_gt *gt)
{
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0),
MTL_PPAT_L4_0_WB);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1),
MTL_PPAT_L4_1_WT);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2),
MTL_PPAT_L4_3_UC);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3),
MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4),
MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
/*
* Remaining PAT entries are left at the hardware-default
* fully-cached setting
*/
}
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
}
static void xehp_setup_private_ppat(struct intel_gt *gt)
{
enum forcewake_domains fw;
unsigned long flags;
fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg),
FW_REG_WRITE);
intel_uncore_forcewake_get(gt->uncore, fw);
intel_gt_mcr_lock(gt, &flags);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
intel_gt_mcr_unlock(gt, flags);
intel_uncore_forcewake_put(gt->uncore, fw);
}
static void icl_setup_private_ppat(struct intel_uncore *uncore)
{
intel_uncore_write(uncore,
GEN10_PAT_INDEX(0),
GEN8_PPAT_WB | GEN8_PPAT_LLC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(1),
GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(2),
GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(3),
GEN8_PPAT_UC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(4),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(5),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(6),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(7),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/*
* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases.
*/
static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
u64 pat;
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
/* for scanout with eLLC */
if (GRAPHICS_VER(i915) >= 9)
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
else
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void chv_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
/*
* Map WB on BDW to snooped on CHV.
*
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
* The hardware will never snoop for certain types of accesses:
* - CPU GTT (GMADR->GGTT->no snoop->memory)
* - PPGTT page tables
* - some other special cycles
*
* As with BDW, we also need to consider the following for GT accesses:
* "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
GEN8_PPAT(2, 0) |
GEN8_PPAT(3, 0) |
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
void setup_private_pat(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_private *i915 = gt->i915;
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
if (gt->type == GT_MEDIA) {
xelpmp_setup_private_ppat(gt->uncore);
return;
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt);
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
else if (GRAPHICS_VER(i915) >= 11)
icl_setup_private_ppat(uncore);
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
chv_setup_private_ppat(uncore);
else
bdw_setup_private_ppat(uncore);
}
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
return vma;
}
struct i915_vma *
__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
{
struct i915_vma *vma;
int err;
vma = __vm_create_scratch_for_read(vm, size);
if (IS_ERR(vma))
return vma;
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
}
return vma;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
|
/*
* Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _vcn_2_5_OFFSET_HEADER
#define _vcn_2_5_OFFSET_HEADER
// addressBlock: uvd0_mmsch_dec
// base address: 0x1e000
#define mmMMSCH_VF_VMID 0x000b
#define mmMMSCH_VF_VMID_BASE_IDX 0
#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
#define mmMMSCH_VF_CTX_SIZE 0x000e
#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
#define mmMMSCH_VF_MAILBOX_HOST 0x0012
#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
#define mmMMSCH_VF_MAILBOX_RESP 0x0013
#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
// addressBlock: uvd0_jpegnpdec
// base address: 0x1e200
#define mmUVD_JPEG_CNTL 0x0080
#define mmUVD_JPEG_CNTL_BASE_IDX 0
#define mmUVD_JPEG_RB_BASE 0x0081
#define mmUVD_JPEG_RB_BASE_BASE_IDX 0
#define mmUVD_JPEG_RB_WPTR 0x0082
#define mmUVD_JPEG_RB_WPTR_BASE_IDX 0
#define mmUVD_JPEG_RB_RPTR 0x0083
#define mmUVD_JPEG_RB_RPTR_BASE_IDX 0
#define mmUVD_JPEG_RB_SIZE 0x0084
#define mmUVD_JPEG_RB_SIZE_BASE_IDX 0
#define mmUVD_JPEG_DEC_SCRATCH0 0x0089
#define mmUVD_JPEG_DEC_SCRATCH0_BASE_IDX 0
#define mmUVD_JPEG_INT_EN 0x008a
#define mmUVD_JPEG_INT_EN_BASE_IDX 0
#define mmUVD_JPEG_INT_STAT 0x008b
#define mmUVD_JPEG_INT_STAT_BASE_IDX 0
#define mmUVD_JPEG_PITCH 0x009f
#define mmUVD_JPEG_PITCH_BASE_IDX 0
#define mmUVD_JPEG_UV_PITCH 0x00a0
#define mmUVD_JPEG_UV_PITCH_BASE_IDX 0
#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE 0x00a1
#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE 0x00a2
#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_DEC_GFX8_ADDR_CONFIG 0x00a3
#define mmJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 0
#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE 0x00a4
#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE 0x00a5
#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_DEC_GFX10_ADDR_CONFIG 0x00a6
#define mmJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 0
#define mmJPEG_DEC_ADDR_MODE 0x00a7
#define mmJPEG_DEC_ADDR_MODE_BASE_IDX 0
#define mmUVD_JPEG_GPCOM_CMD 0x00a9
#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 0
#define mmUVD_JPEG_GPCOM_DATA0 0x00aa
#define mmUVD_JPEG_GPCOM_DATA0_BASE_IDX 0
#define mmUVD_JPEG_GPCOM_DATA1 0x00ab
#define mmUVD_JPEG_GPCOM_DATA1_BASE_IDX 0
#define mmUVD_JPEG_SCRATCH1 0x00ae
#define mmUVD_JPEG_SCRATCH1_BASE_IDX 0
#define mmUVD_JPEG_DEC_SOFT_RST 0x00af
#define mmUVD_JPEG_DEC_SOFT_RST_BASE_IDX 0
// addressBlock: uvd0_uvd_jpeg_enc_dec
// base address: 0x1e300
#define mmUVD_JPEG_ENC_INT_EN 0x00c1
#define mmUVD_JPEG_ENC_INT_EN_BASE_IDX 0
#define mmUVD_JPEG_ENC_INT_STATUS 0x00c2
#define mmUVD_JPEG_ENC_INT_STATUS_BASE_IDX 0
#define mmUVD_JPEG_ENC_ENGINE_CNTL 0x00c5
#define mmUVD_JPEG_ENC_ENGINE_CNTL_BASE_IDX 0
#define mmUVD_JPEG_ENC_SCRATCH1 0x00ce
#define mmUVD_JPEG_ENC_SCRATCH1_BASE_IDX 0
// addressBlock: uvd0_uvd_jpeg_enc_sclk_dec
// base address: 0x1e380
#define mmUVD_JPEG_ENC_STATUS 0x00e5
#define mmUVD_JPEG_ENC_STATUS_BASE_IDX 0
#define mmUVD_JPEG_ENC_PITCH 0x00e6
#define mmUVD_JPEG_ENC_PITCH_BASE_IDX 0
#define mmUVD_JPEG_ENC_LUMA_BASE 0x00e7
#define mmUVD_JPEG_ENC_LUMA_BASE_BASE_IDX 0
#define mmUVD_JPEG_ENC_CHROMAU_BASE 0x00e8
#define mmUVD_JPEG_ENC_CHROMAU_BASE_BASE_IDX 0
#define mmUVD_JPEG_ENC_CHROMAV_BASE 0x00e9
#define mmUVD_JPEG_ENC_CHROMAV_BASE_BASE_IDX 0
#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE 0x00ea
#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE 0x00eb
#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE_BASE_IDX 0
#define mmJPEG_ENC_GFX10_ADDR_CONFIG 0x00ec
#define mmJPEG_ENC_GFX10_ADDR_CONFIG_BASE_IDX 0
#define mmJPEG_ENC_ADDR_MODE 0x00ed
#define mmJPEG_ENC_ADDR_MODE_BASE_IDX 0
#define mmUVD_JPEG_ENC_GPCOM_CMD 0x00ee
#define mmUVD_JPEG_ENC_GPCOM_CMD_BASE_IDX 0
#define mmUVD_JPEG_ENC_GPCOM_DATA0 0x00ef
#define mmUVD_JPEG_ENC_GPCOM_DATA0_BASE_IDX 0
#define mmUVD_JPEG_ENC_GPCOM_DATA1 0x00f0
#define mmUVD_JPEG_ENC_GPCOM_DATA1_BASE_IDX 0
#define mmUVD_JPEG_ENC_CGC_CNTL 0x00f5
#define mmUVD_JPEG_ENC_CGC_CNTL_BASE_IDX 0
#define mmUVD_JPEG_ENC_SCRATCH0 0x00f6
#define mmUVD_JPEG_ENC_SCRATCH0_BASE_IDX 0
#define mmUVD_JPEG_ENC_SOFT_RST 0x00f7
#define mmUVD_JPEG_ENC_SOFT_RST_BASE_IDX 0
// addressBlock: uvd0_uvd_jrbc_dec
// base address: 0x1e400
#define mmUVD_JRBC_RB_WPTR 0x0100
#define mmUVD_JRBC_RB_WPTR_BASE_IDX 0
#define mmUVD_JRBC_RB_CNTL 0x0101
#define mmUVD_JRBC_RB_CNTL_BASE_IDX 0
#define mmUVD_JRBC_IB_SIZE 0x0102
#define mmUVD_JRBC_IB_SIZE_BASE_IDX 0
#define mmUVD_JRBC_URGENT_CNTL 0x0103
#define mmUVD_JRBC_URGENT_CNTL_BASE_IDX 0
#define mmUVD_JRBC_RB_REF_DATA 0x0104
#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 0
#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0105
#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
#define mmUVD_JRBC_SOFT_RESET 0x0108
#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 0
#define mmUVD_JRBC_STATUS 0x0109
#define mmUVD_JRBC_STATUS_BASE_IDX 0
#define mmUVD_JRBC_RB_RPTR 0x010a
#define mmUVD_JRBC_RB_RPTR_BASE_IDX 0
#define mmUVD_JRBC_RB_BUF_STATUS 0x010b
#define mmUVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
#define mmUVD_JRBC_IB_BUF_STATUS 0x010c
#define mmUVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
#define mmUVD_JRBC_IB_SIZE_UPDATE 0x010d
#define mmUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
#define mmUVD_JRBC_IB_COND_RD_TIMER 0x010e
#define mmUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
#define mmUVD_JRBC_IB_REF_DATA 0x010f
#define mmUVD_JRBC_IB_REF_DATA_BASE_IDX 0
#define mmUVD_JPEG_PREEMPT_CMD 0x0110
#define mmUVD_JPEG_PREEMPT_CMD_BASE_IDX 0
#define mmUVD_JPEG_PREEMPT_FENCE_DATA0 0x0111
#define mmUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
#define mmUVD_JPEG_PREEMPT_FENCE_DATA1 0x0112
#define mmUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
#define mmUVD_JRBC_RB_SIZE 0x0113
#define mmUVD_JRBC_RB_SIZE_BASE_IDX 0
#define mmUVD_JRBC_SCRATCH0 0x0114
#define mmUVD_JRBC_SCRATCH0_BASE_IDX 0
// addressBlock: uvd0_uvd_jrbc_enc_dec
// base address: 0x1e480
#define mmUVD_JRBC_ENC_RB_WPTR 0x0120
#define mmUVD_JRBC_ENC_RB_WPTR_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_CNTL 0x0121
#define mmUVD_JRBC_ENC_RB_CNTL_BASE_IDX 0
#define mmUVD_JRBC_ENC_IB_SIZE 0x0122
#define mmUVD_JRBC_ENC_IB_SIZE_BASE_IDX 0
#define mmUVD_JRBC_ENC_URGENT_CNTL 0x0123
#define mmUVD_JRBC_ENC_URGENT_CNTL_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_REF_DATA 0x0124
#define mmUVD_JRBC_ENC_RB_REF_DATA_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER 0x0125
#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER_BASE_IDX 0
#define mmUVD_JRBC_ENC_SOFT_RESET 0x0128
#define mmUVD_JRBC_ENC_SOFT_RESET_BASE_IDX 0
#define mmUVD_JRBC_ENC_STATUS 0x0129
#define mmUVD_JRBC_ENC_STATUS_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_RPTR 0x012a
#define mmUVD_JRBC_ENC_RB_RPTR_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_BUF_STATUS 0x012b
#define mmUVD_JRBC_ENC_RB_BUF_STATUS_BASE_IDX 0
#define mmUVD_JRBC_ENC_IB_BUF_STATUS 0x012c
#define mmUVD_JRBC_ENC_IB_BUF_STATUS_BASE_IDX 0
#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE 0x012d
#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE_BASE_IDX 0
#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER 0x012e
#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER_BASE_IDX 0
#define mmUVD_JRBC_ENC_IB_REF_DATA 0x012f
#define mmUVD_JRBC_ENC_IB_REF_DATA_BASE_IDX 0
#define mmUVD_JPEG_ENC_PREEMPT_CMD 0x0130
#define mmUVD_JPEG_ENC_PREEMPT_CMD_BASE_IDX 0
#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0 0x0131
#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0_BASE_IDX 0
#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1 0x0132
#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1_BASE_IDX 0
#define mmUVD_JRBC_ENC_RB_SIZE 0x0133
#define mmUVD_JRBC_ENC_RB_SIZE_BASE_IDX 0
#define mmUVD_JRBC_ENC_SCRATCH0 0x0134
#define mmUVD_JRBC_ENC_SCRATCH0_BASE_IDX 0
// addressBlock: uvd0_uvd_jmi_dec
// base address: 0x1e500
#define mmUVD_JMI_CTRL 0x0145
#define mmUVD_JMI_CTRL_BASE_IDX 0
#define mmUVD_LMI_JRBC_CTRL 0x0146
#define mmUVD_LMI_JRBC_CTRL_BASE_IDX 0
#define mmUVD_LMI_JPEG_CTRL 0x0147
#define mmUVD_LMI_JPEG_CTRL_BASE_IDX 0
#define mmUVD_JMI_EJRBC_CTRL 0x0148
#define mmUVD_JMI_EJRBC_CTRL_BASE_IDX 0
#define mmUVD_LMI_EJPEG_CTRL 0x0149
#define mmUVD_LMI_EJPEG_CTRL_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_VMID 0x014f
#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_VMID 0x0150
#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 0
#define mmUVD_LMI_JPEG_VMID 0x0151
#define mmUVD_LMI_JPEG_VMID_BASE_IDX 0
#define mmUVD_JMI_ENC_JRBC_IB_VMID 0x0152
#define mmUVD_JMI_ENC_JRBC_IB_VMID_BASE_IDX 0
#define mmUVD_JMI_ENC_JRBC_RB_VMID 0x0153
#define mmUVD_JMI_ENC_JRBC_RB_VMID_BASE_IDX 0
#define mmUVD_JMI_ENC_JPEG_VMID 0x0154
#define mmUVD_JMI_ENC_JPEG_VMID_BASE_IDX 0
#define mmUVD_JMI_PERFMON_CTRL 0x015c
#define mmUVD_JMI_PERFMON_CTRL_BASE_IDX 0
#define mmUVD_JMI_PERFMON_COUNT_LO 0x015d
#define mmUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 0
#define mmUVD_JMI_PERFMON_COUNT_HI 0x015e
#define mmUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 0
#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0160
#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0161
#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0162
#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0163
#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0164
#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0165
#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0166
#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x0167
#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0168
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0169
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x016a
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x016b
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x016c
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x016d
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x016e
#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x016f
#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0170
#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0171
#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x017a
#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x017b
#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW 0x017c
#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH 0x017d
#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW 0x017e
#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH 0x017f
#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW 0x0180
#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x0181
#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0182
#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0183
#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0184
#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0185
#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0186
#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0187
#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JPEG_PREEMPT_VMID 0x0188
#define mmUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID 0x0189
#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID_BASE_IDX 0
#define mmUVD_LMI_JPEG2_VMID 0x018a
#define mmUVD_LMI_JPEG2_VMID_BASE_IDX 0
#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW 0x018b
#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH 0x018c
#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW 0x018d
#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH 0x018e
#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_LMI_JPEG_CTRL2 0x018f
#define mmUVD_LMI_JPEG_CTRL2_BASE_IDX 0
#define mmUVD_JMI_DEC_SWAP_CNTL 0x0190
#define mmUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
#define mmUVD_JMI_ENC_SWAP_CNTL 0x0191
#define mmUVD_JMI_ENC_SWAP_CNTL_BASE_IDX 0
#define mmUVD_JMI_CNTL 0x0192
#define mmUVD_JMI_CNTL_BASE_IDX 0
#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW 0x019a
#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW_BASE_IDX 0
#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH 0x019b
#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
#define mmUVD_JMI_DEC_SWAP_CNTL2 0x019c
#define mmUVD_JMI_DEC_SWAP_CNTL2_BASE_IDX 0
// addressBlock: uvd0_uvd_jpeg_common_dec
// base address: 0x1e700
#define mmJPEG_SOFT_RESET_STATUS 0x01c0
#define mmJPEG_SOFT_RESET_STATUS_BASE_IDX 0
#define mmJPEG_SYS_INT_EN 0x01c1
#define mmJPEG_SYS_INT_EN_BASE_IDX 0
#define mmJPEG_SYS_INT_STATUS 0x01c2
#define mmJPEG_SYS_INT_STATUS_BASE_IDX 0
#define mmJPEG_SYS_INT_ACK 0x01c3
#define mmJPEG_SYS_INT_ACK_BASE_IDX 0
#define mmJPEG_MASTINT_EN 0x01c8
#define mmJPEG_MASTINT_EN_BASE_IDX 0
#define mmJPEG_IH_CTRL 0x01c9
#define mmJPEG_IH_CTRL_BASE_IDX 0
#define mmJRBBM_ARB_CTRL 0x01cb
#define mmJRBBM_ARB_CTRL_BASE_IDX 0
// addressBlock: uvd0_uvd_jpeg_common_sclk_dec
// base address: 0x1e780
#define mmJPEG_CGC_GATE 0x01e0
#define mmJPEG_CGC_GATE_BASE_IDX 0
#define mmJPEG_CGC_CTRL 0x01e1
#define mmJPEG_CGC_CTRL_BASE_IDX 0
#define mmJPEG_CGC_STATUS 0x01e2
#define mmJPEG_CGC_STATUS_BASE_IDX 0
#define mmJPEG_COMN_CGC_MEM_CTRL 0x01e3
#define mmJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 0
#define mmJPEG_DEC_CGC_MEM_CTRL 0x01e4
#define mmJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 0
#define mmJPEG2_DEC_CGC_MEM_CTRL 0x01e5
#define mmJPEG2_DEC_CGC_MEM_CTRL_BASE_IDX 0
#define mmJPEG_ENC_CGC_MEM_CTRL 0x01e6
#define mmJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 0
#define mmJPEG_SOFT_RESET2 0x01e7
#define mmJPEG_SOFT_RESET2_BASE_IDX 0
#define mmJPEG_PERF_BANK_CONF 0x01e8
#define mmJPEG_PERF_BANK_CONF_BASE_IDX 0
#define mmJPEG_PERF_BANK_EVENT_SEL 0x01e9
#define mmJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 0
#define mmJPEG_PERF_BANK_COUNT0 0x01ea
#define mmJPEG_PERF_BANK_COUNT0_BASE_IDX 0
#define mmJPEG_PERF_BANK_COUNT1 0x01eb
#define mmJPEG_PERF_BANK_COUNT1_BASE_IDX 0
#define mmJPEG_PERF_BANK_COUNT2 0x01ec
#define mmJPEG_PERF_BANK_COUNT2_BASE_IDX 0
#define mmJPEG_PERF_BANK_COUNT3 0x01ed
#define mmJPEG_PERF_BANK_COUNT3_BASE_IDX 0
// addressBlock: uvd0_uvd_pg_dec
// base address: 0x1f800
#define mmUVD_PGFSM_CONFIG 0x0000
#define mmUVD_PGFSM_CONFIG_BASE_IDX 1
#define mmUVD_PGFSM_STATUS 0x0001
#define mmUVD_PGFSM_STATUS_BASE_IDX 1
#define mmUVD_POWER_STATUS 0x0004
#define mmUVD_POWER_STATUS_BASE_IDX 1
#define mmUVD_PG_IND_INDEX 0x0005
#define mmUVD_PG_IND_INDEX_BASE_IDX 1
#define mmUVD_PG_IND_DATA 0x0006
#define mmUVD_PG_IND_DATA_BASE_IDX 1
#define mmCC_UVD_HARVESTING 0x0007
#define mmCC_UVD_HARVESTING_BASE_IDX 1
#define mmUVD_JPEG_POWER_STATUS 0x000a
#define mmUVD_JPEG_POWER_STATUS_BASE_IDX 1
#define mmUVD_DPG_LMA_CTL 0x0011
#define mmUVD_DPG_LMA_CTL_BASE_IDX 1
#define mmUVD_DPG_LMA_DATA 0x0012
#define mmUVD_DPG_LMA_DATA_BASE_IDX 1
#define mmUVD_DPG_LMA_MASK 0x0013
#define mmUVD_DPG_LMA_MASK_BASE_IDX 1
#define mmUVD_DPG_PAUSE 0x0014
#define mmUVD_DPG_PAUSE_BASE_IDX 1
#define mmUVD_SCRATCH1 0x0015
#define mmUVD_SCRATCH1_BASE_IDX 1
#define mmUVD_SCRATCH2 0x0016
#define mmUVD_SCRATCH2_BASE_IDX 1
#define mmUVD_SCRATCH3 0x0017
#define mmUVD_SCRATCH3_BASE_IDX 1
#define mmUVD_SCRATCH4 0x0018
#define mmUVD_SCRATCH4_BASE_IDX 1
#define mmUVD_SCRATCH5 0x0019
#define mmUVD_SCRATCH5_BASE_IDX 1
#define mmUVD_SCRATCH6 0x001a
#define mmUVD_SCRATCH6_BASE_IDX 1
#define mmUVD_SCRATCH7 0x001b
#define mmUVD_SCRATCH7_BASE_IDX 1
#define mmUVD_SCRATCH8 0x001c
#define mmUVD_SCRATCH8_BASE_IDX 1
#define mmUVD_SCRATCH9 0x001d
#define mmUVD_SCRATCH9_BASE_IDX 1
#define mmUVD_SCRATCH10 0x001e
#define mmUVD_SCRATCH10_BASE_IDX 1
#define mmUVD_SCRATCH11 0x001f
#define mmUVD_SCRATCH11_BASE_IDX 1
#define mmUVD_SCRATCH12 0x0020
#define mmUVD_SCRATCH12_BASE_IDX 1
#define mmUVD_SCRATCH13 0x0021
#define mmUVD_SCRATCH13_BASE_IDX 1
#define mmUVD_SCRATCH14 0x0022
#define mmUVD_SCRATCH14_BASE_IDX 1
#define mmUVD_FREE_COUNTER_REG 0x0024
#define mmUVD_FREE_COUNTER_REG_BASE_IDX 1
#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0025
#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0026
#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_DPG_VCPU_CACHE_OFFSET0 0x0027
#define mmUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1
#define mmUVD_DPG_LMI_VCPU_CACHE_VMID 0x0028
#define mmUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1
#define mmUVD_PF_STATUS 0x0039
#define mmUVD_PF_STATUS_BASE_IDX 1
#define mmUVD_DPG_CLK_EN_VCPU_REPORT 0x003c
#define mmUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1
#define mmUVD_GFX8_ADDR_CONFIG 0x0049
#define mmUVD_GFX8_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_GFX10_ADDR_CONFIG 0x004a
#define mmUVD_GFX10_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_GPCNT2_CNTL 0x004b
#define mmUVD_GPCNT2_CNTL_BASE_IDX 1
#define mmUVD_GPCNT2_TARGET_LOWER 0x004c
#define mmUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1
#define mmUVD_GPCNT2_STATUS_LOWER 0x004d
#define mmUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1
#define mmUVD_GPCNT2_TARGET_UPPER 0x004e
#define mmUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1
#define mmUVD_GPCNT2_STATUS_UPPER 0x004f
#define mmUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1
#define mmUVD_GPCNT3_CNTL 0x0050
#define mmUVD_GPCNT3_CNTL_BASE_IDX 1
#define mmUVD_GPCNT3_TARGET_LOWER 0x0051
#define mmUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1
#define mmUVD_GPCNT3_STATUS_LOWER 0x0052
#define mmUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1
#define mmUVD_GPCNT3_TARGET_UPPER 0x0053
#define mmUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1
#define mmUVD_GPCNT3_STATUS_UPPER 0x0054
#define mmUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1
// addressBlock: uvd0_uvddec
// base address: 0x1fa00
#define mmUVD_STATUS 0x0080
#define mmUVD_STATUS_BASE_IDX 1
#define mmUVD_ENC_PIPE_BUSY 0x0081
#define mmUVD_ENC_PIPE_BUSY_BASE_IDX 1
#define mmUVD_SOFT_RESET 0x0084
#define mmUVD_SOFT_RESET_BASE_IDX 1
#define mmUVD_SOFT_RESET2 0x0085
#define mmUVD_SOFT_RESET2_BASE_IDX 1
#define mmUVD_MMSCH_SOFT_RESET 0x0086
#define mmUVD_MMSCH_SOFT_RESET_BASE_IDX 1
#define mmUVD_CGC_GATE 0x0088
#define mmUVD_CGC_GATE_BASE_IDX 1
#define mmUVD_CGC_STATUS 0x0089
#define mmUVD_CGC_STATUS_BASE_IDX 1
#define mmUVD_CGC_CTRL 0x008a
#define mmUVD_CGC_CTRL_BASE_IDX 1
#define mmUVD_CGC_UDEC_STATUS 0x008b
#define mmUVD_CGC_UDEC_STATUS_BASE_IDX 1
#define mmUVD_SUVD_CGC_GATE 0x008c
#define mmUVD_SUVD_CGC_GATE_BASE_IDX 1
#define mmUVD_SUVD_CGC_STATUS 0x008d
#define mmUVD_SUVD_CGC_STATUS_BASE_IDX 1
#define mmUVD_SUVD_CGC_CTRL 0x008e
#define mmUVD_SUVD_CGC_CTRL_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_CMD 0x008f
#define mmUVD_GPCOM_VCPU_CMD_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_DATA0 0x0090
#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_DATA1 0x0091
#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
#define mmUVD_GPCOM_SYS_CMD 0x0092
#define mmUVD_GPCOM_SYS_CMD_BASE_IDX 1
#define mmUVD_GPCOM_SYS_DATA0 0x0093
#define mmUVD_GPCOM_SYS_DATA0_BASE_IDX 1
#define mmUVD_GPCOM_SYS_DATA1 0x0094
#define mmUVD_GPCOM_SYS_DATA1_BASE_IDX 1
#define mmUVD_VCPU_INT_EN 0x0095
#define mmUVD_VCPU_INT_EN_BASE_IDX 1
#define mmUVD_VCPU_INT_ACK 0x0097
#define mmUVD_VCPU_INT_ACK_BASE_IDX 1
#define mmUVD_VCPU_INT_ROUTE 0x0098
#define mmUVD_VCPU_INT_ROUTE_BASE_IDX 1
#define mmUVD_ENC_VCPU_INT_EN 0x009e
#define mmUVD_ENC_VCPU_INT_EN_BASE_IDX 1
#define mmUVD_ENC_VCPU_INT_ACK 0x00a0
#define mmUVD_ENC_VCPU_INT_ACK_BASE_IDX 1
#define mmUVD_MASTINT_EN 0x00a1
#define mmUVD_MASTINT_EN_BASE_IDX 1
#define mmUVD_SYS_INT_EN 0x00a2
#define mmUVD_SYS_INT_EN_BASE_IDX 1
#define mmUVD_SYS_INT_STATUS 0x00a3
#define mmUVD_SYS_INT_STATUS_BASE_IDX 1
#define mmUVD_SYS_INT_ACK 0x00a4
#define mmUVD_SYS_INT_ACK_BASE_IDX 1
#define mmUVD_JOB_DONE 0x00a5
#define mmUVD_JOB_DONE_BASE_IDX 1
#define mmUVD_CBUF_ID 0x00a6
#define mmUVD_CBUF_ID_BASE_IDX 1
#define mmUVD_CONTEXT_ID 0x00a7
#define mmUVD_CONTEXT_ID_BASE_IDX 1
#define mmUVD_CONTEXT_ID2 0x00a8
#define mmUVD_CONTEXT_ID2_BASE_IDX 1
#define mmUVD_NO_OP 0x00a9
#define mmUVD_NO_OP_BASE_IDX 1
#define mmUVD_RB_BASE_LO 0x00aa
#define mmUVD_RB_BASE_LO_BASE_IDX 1
#define mmUVD_RB_BASE_HI 0x00ab
#define mmUVD_RB_BASE_HI_BASE_IDX 1
#define mmUVD_RB_SIZE 0x00ac
#define mmUVD_RB_SIZE_BASE_IDX 1
#define mmUVD_RB_RPTR 0x00ad
#define mmUVD_RB_RPTR_BASE_IDX 1
#define mmUVD_RB_WPTR 0x00ae
#define mmUVD_RB_WPTR_BASE_IDX 1
#define mmUVD_RB_BASE_LO2 0x00af
#define mmUVD_RB_BASE_LO2_BASE_IDX 1
#define mmUVD_RB_BASE_HI2 0x00b0
#define mmUVD_RB_BASE_HI2_BASE_IDX 1
#define mmUVD_RB_SIZE2 0x00b1
#define mmUVD_RB_SIZE2_BASE_IDX 1
#define mmUVD_RB_RPTR2 0x00b2
#define mmUVD_RB_RPTR2_BASE_IDX 1
#define mmUVD_RB_WPTR2 0x00b3
#define mmUVD_RB_WPTR2_BASE_IDX 1
#define mmUVD_RB_BASE_LO3 0x00b4
#define mmUVD_RB_BASE_LO3_BASE_IDX 1
#define mmUVD_RB_BASE_HI3 0x00b5
#define mmUVD_RB_BASE_HI3_BASE_IDX 1
#define mmUVD_RB_SIZE3 0x00b6
#define mmUVD_RB_SIZE3_BASE_IDX 1
#define mmUVD_RB_RPTR3 0x00b7
#define mmUVD_RB_RPTR3_BASE_IDX 1
#define mmUVD_RB_WPTR3 0x00b8
#define mmUVD_RB_WPTR3_BASE_IDX 1
#define mmUVD_RB_BASE_LO4 0x00b9
#define mmUVD_RB_BASE_LO4_BASE_IDX 1
#define mmUVD_RB_BASE_HI4 0x00ba
#define mmUVD_RB_BASE_HI4_BASE_IDX 1
#define mmUVD_RB_SIZE4 0x00bb
#define mmUVD_RB_SIZE4_BASE_IDX 1
#define mmUVD_RB_RPTR4 0x00bc
#define mmUVD_RB_RPTR4_BASE_IDX 1
#define mmUVD_RB_WPTR4 0x00bd
#define mmUVD_RB_WPTR4_BASE_IDX 1
#define mmUVD_OUT_RB_BASE_LO 0x00be
#define mmUVD_OUT_RB_BASE_LO_BASE_IDX 1
#define mmUVD_OUT_RB_BASE_HI 0x00bf
#define mmUVD_OUT_RB_BASE_HI_BASE_IDX 1
#define mmUVD_OUT_RB_SIZE 0x00c0
#define mmUVD_OUT_RB_SIZE_BASE_IDX 1
#define mmUVD_OUT_RB_RPTR 0x00c1
#define mmUVD_OUT_RB_RPTR_BASE_IDX 1
#define mmUVD_OUT_RB_WPTR 0x00c2
#define mmUVD_OUT_RB_WPTR_BASE_IDX 1
#define mmUVD_RB_ARB_CTRL 0x00c6
#define mmUVD_RB_ARB_CTRL_BASE_IDX 1
#define mmUVD_CTX_INDEX 0x00c7
#define mmUVD_CTX_INDEX_BASE_IDX 1
#define mmUVD_CTX_DATA 0x00c8
#define mmUVD_CTX_DATA_BASE_IDX 1
#define mmUVD_CXW_WR 0x00c9
#define mmUVD_CXW_WR_BASE_IDX 1
#define mmUVD_CXW_WR_INT_ID 0x00ca
#define mmUVD_CXW_WR_INT_ID_BASE_IDX 1
#define mmUVD_CXW_WR_INT_CTX_ID 0x00cb
#define mmUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1
#define mmUVD_CXW_INT_ID 0x00cc
#define mmUVD_CXW_INT_ID_BASE_IDX 1
#define mmUVD_TOP_CTRL 0x00cf
#define mmUVD_TOP_CTRL_BASE_IDX 1
#define mmUVD_YBASE 0x00d0
#define mmUVD_YBASE_BASE_IDX 1
#define mmUVD_UVBASE 0x00d1
#define mmUVD_UVBASE_BASE_IDX 1
#define mmUVD_PITCH 0x00d2
#define mmUVD_PITCH_BASE_IDX 1
#define mmUVD_WIDTH 0x00d3
#define mmUVD_WIDTH_BASE_IDX 1
#define mmUVD_HEIGHT 0x00d4
#define mmUVD_HEIGHT_BASE_IDX 1
#define mmUVD_PICCOUNT 0x00d5
#define mmUVD_PICCOUNT_BASE_IDX 1
#define mmUVD_SCRATCH_NP 0x00db
#define mmUVD_SCRATCH_NP_BASE_IDX 1
#define mmUVD_VERSION 0x00dd
#define mmUVD_VERSION_BASE_IDX 1
#define mmUVD_GP_SCRATCH0 0x00de
#define mmUVD_GP_SCRATCH0_BASE_IDX 1
#define mmUVD_GP_SCRATCH1 0x00df
#define mmUVD_GP_SCRATCH1_BASE_IDX 1
#define mmUVD_GP_SCRATCH2 0x00e0
#define mmUVD_GP_SCRATCH2_BASE_IDX 1
#define mmUVD_GP_SCRATCH3 0x00e1
#define mmUVD_GP_SCRATCH3_BASE_IDX 1
#define mmUVD_GP_SCRATCH4 0x00e2
#define mmUVD_GP_SCRATCH4_BASE_IDX 1
#define mmUVD_GP_SCRATCH5 0x00e3
#define mmUVD_GP_SCRATCH5_BASE_IDX 1
#define mmUVD_GP_SCRATCH6 0x00e4
#define mmUVD_GP_SCRATCH6_BASE_IDX 1
#define mmUVD_GP_SCRATCH7 0x00e5
#define mmUVD_GP_SCRATCH7_BASE_IDX 1
#define mmUVD_GP_SCRATCH8 0x00e6
#define mmUVD_GP_SCRATCH8_BASE_IDX 1
#define mmUVD_GP_SCRATCH9 0x00e7
#define mmUVD_GP_SCRATCH9_BASE_IDX 1
#define mmUVD_GP_SCRATCH10 0x00e8
#define mmUVD_GP_SCRATCH10_BASE_IDX 1
#define mmUVD_GP_SCRATCH11 0x00e9
#define mmUVD_GP_SCRATCH11_BASE_IDX 1
#define mmUVD_GP_SCRATCH12 0x00ea
#define mmUVD_GP_SCRATCH12_BASE_IDX 1
#define mmUVD_GP_SCRATCH13 0x00eb
#define mmUVD_GP_SCRATCH13_BASE_IDX 1
#define mmUVD_GP_SCRATCH14 0x00ec
#define mmUVD_GP_SCRATCH14_BASE_IDX 1
#define mmUVD_GP_SCRATCH15 0x00ed
#define mmUVD_GP_SCRATCH15_BASE_IDX 1
#define mmUVD_GP_SCRATCH16 0x00ee
#define mmUVD_GP_SCRATCH16_BASE_IDX 1
#define mmUVD_GP_SCRATCH17 0x00ef
#define mmUVD_GP_SCRATCH17_BASE_IDX 1
#define mmUVD_GP_SCRATCH18 0x00f0
#define mmUVD_GP_SCRATCH18_BASE_IDX 1
#define mmUVD_GP_SCRATCH19 0x00f1
#define mmUVD_GP_SCRATCH19_BASE_IDX 1
#define mmUVD_GP_SCRATCH20 0x00f2
#define mmUVD_GP_SCRATCH20_BASE_IDX 1
#define mmUVD_GP_SCRATCH21 0x00f3
#define mmUVD_GP_SCRATCH21_BASE_IDX 1
#define mmUVD_GP_SCRATCH22 0x00f4
#define mmUVD_GP_SCRATCH22_BASE_IDX 1
#define mmUVD_GP_SCRATCH23 0x00f5
#define mmUVD_GP_SCRATCH23_BASE_IDX 1
// addressBlock: uvd0_ecpudec
// base address: 0x1fd00
#define mmUVD_VCPU_CACHE_OFFSET0 0x0140
#define mmUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE0 0x0141
#define mmUVD_VCPU_CACHE_SIZE0_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET1 0x0142
#define mmUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE1 0x0143
#define mmUVD_VCPU_CACHE_SIZE1_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET2 0x0144
#define mmUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE2 0x0145
#define mmUVD_VCPU_CACHE_SIZE2_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET3 0x0146
#define mmUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE3 0x0147
#define mmUVD_VCPU_CACHE_SIZE3_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET4 0x0148
#define mmUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE4 0x0149
#define mmUVD_VCPU_CACHE_SIZE4_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET5 0x014a
#define mmUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE5 0x014b
#define mmUVD_VCPU_CACHE_SIZE5_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET6 0x014c
#define mmUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE6 0x014d
#define mmUVD_VCPU_CACHE_SIZE6_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET7 0x014e
#define mmUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE7 0x014f
#define mmUVD_VCPU_CACHE_SIZE7_BASE_IDX 1
#define mmUVD_VCPU_CACHE_OFFSET8 0x0150
#define mmUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1
#define mmUVD_VCPU_CACHE_SIZE8 0x0151
#define mmUVD_VCPU_CACHE_SIZE8_BASE_IDX 1
#define mmUVD_VCPU_NONCACHE_OFFSET0 0x0152
#define mmUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1
#define mmUVD_VCPU_NONCACHE_SIZE0 0x0153
#define mmUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1
#define mmUVD_VCPU_NONCACHE_OFFSET1 0x0154
#define mmUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1
#define mmUVD_VCPU_NONCACHE_SIZE1 0x0155
#define mmUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1
#define mmUVD_VCPU_CNTL 0x0156
#define mmUVD_VCPU_CNTL_BASE_IDX 1
#define mmUVD_VCPU_PRID 0x0157
#define mmUVD_VCPU_PRID_BASE_IDX 1
#define mmUVD_VCPU_TRCE 0x0158
#define mmUVD_VCPU_TRCE_BASE_IDX 1
#define mmUVD_VCPU_TRCE_RD 0x0159
#define mmUVD_VCPU_TRCE_RD_BASE_IDX 1
// addressBlock: uvd0_uvd_mpcdec
// base address: 0x20310
#define mmUVD_MP_SWAP_CNTL 0x02c4
#define mmUVD_MP_SWAP_CNTL_BASE_IDX 1
#define mmUVD_MP_SWAP_CNTL2 0x02c5
#define mmUVD_MP_SWAP_CNTL2_BASE_IDX 1
#define mmUVD_MPC_LUMA_SRCH 0x02c6
#define mmUVD_MPC_LUMA_SRCH_BASE_IDX 1
#define mmUVD_MPC_LUMA_HIT 0x02c7
#define mmUVD_MPC_LUMA_HIT_BASE_IDX 1
#define mmUVD_MPC_LUMA_HITPEND 0x02c8
#define mmUVD_MPC_LUMA_HITPEND_BASE_IDX 1
#define mmUVD_MPC_CHROMA_SRCH 0x02c9
#define mmUVD_MPC_CHROMA_SRCH_BASE_IDX 1
#define mmUVD_MPC_CHROMA_HIT 0x02ca
#define mmUVD_MPC_CHROMA_HIT_BASE_IDX 1
#define mmUVD_MPC_CHROMA_HITPEND 0x02cb
#define mmUVD_MPC_CHROMA_HITPEND_BASE_IDX 1
#define mmUVD_MPC_CNTL 0x02cc
#define mmUVD_MPC_CNTL_BASE_IDX 1
#define mmUVD_MPC_PITCH 0x02cd
#define mmUVD_MPC_PITCH_BASE_IDX 1
#define mmUVD_MPC_SET_MUXA0 0x02ce
#define mmUVD_MPC_SET_MUXA0_BASE_IDX 1
#define mmUVD_MPC_SET_MUXA1 0x02cf
#define mmUVD_MPC_SET_MUXA1_BASE_IDX 1
#define mmUVD_MPC_SET_MUXB0 0x02d0
#define mmUVD_MPC_SET_MUXB0_BASE_IDX 1
#define mmUVD_MPC_SET_MUXB1 0x02d1
#define mmUVD_MPC_SET_MUXB1_BASE_IDX 1
#define mmUVD_MPC_SET_MUX 0x02d2
#define mmUVD_MPC_SET_MUX_BASE_IDX 1
#define mmUVD_MPC_SET_ALU 0x02d3
#define mmUVD_MPC_SET_ALU_BASE_IDX 1
#define mmUVD_MPC_PERF0 0x02d4
#define mmUVD_MPC_PERF0_BASE_IDX 1
#define mmUVD_MPC_PERF1 0x02d5
#define mmUVD_MPC_PERF1_BASE_IDX 1
// addressBlock: uvd0_uvd_rbcdec
// base address: 0x20370
#define mmUVD_RBC_IB_SIZE 0x02dc
#define mmUVD_RBC_IB_SIZE_BASE_IDX 1
#define mmUVD_RBC_IB_SIZE_UPDATE 0x02dd
#define mmUVD_RBC_IB_SIZE_UPDATE_BASE_IDX 1
#define mmUVD_RBC_RB_CNTL 0x02de
#define mmUVD_RBC_RB_CNTL_BASE_IDX 1
#define mmUVD_RBC_RB_RPTR_ADDR 0x02df
#define mmUVD_RBC_RB_RPTR_ADDR_BASE_IDX 1
#define mmUVD_RBC_RB_RPTR 0x02e0
#define mmUVD_RBC_RB_RPTR_BASE_IDX 1
#define mmUVD_RBC_RB_WPTR 0x02e1
#define mmUVD_RBC_RB_WPTR_BASE_IDX 1
#define mmUVD_RBC_VCPU_ACCESS 0x02e2
#define mmUVD_RBC_VCPU_ACCESS_BASE_IDX 1
#define mmUVD_RBC_READ_REQ_URGENT_CNTL 0x02e5
#define mmUVD_RBC_READ_REQ_URGENT_CNTL_BASE_IDX 1
#define mmUVD_RBC_RB_WPTR_CNTL 0x02e6
#define mmUVD_RBC_RB_WPTR_CNTL_BASE_IDX 1
#define mmUVD_RBC_WPTR_STATUS 0x02e7
#define mmUVD_RBC_WPTR_STATUS_BASE_IDX 1
#define mmUVD_RBC_WPTR_POLL_CNTL 0x02e8
#define mmUVD_RBC_WPTR_POLL_CNTL_BASE_IDX 1
#define mmUVD_RBC_WPTR_POLL_ADDR 0x02e9
#define mmUVD_RBC_WPTR_POLL_ADDR_BASE_IDX 1
#define mmUVD_SEMA_CMD 0x02ea
#define mmUVD_SEMA_CMD_BASE_IDX 1
#define mmUVD_SEMA_ADDR_LOW 0x02eb
#define mmUVD_SEMA_ADDR_LOW_BASE_IDX 1
#define mmUVD_SEMA_ADDR_HIGH 0x02ec
#define mmUVD_SEMA_ADDR_HIGH_BASE_IDX 1
#define mmUVD_ENGINE_CNTL 0x02ed
#define mmUVD_ENGINE_CNTL_BASE_IDX 1
#define mmUVD_SEMA_TIMEOUT_STATUS 0x02ee
#define mmUVD_SEMA_TIMEOUT_STATUS_BASE_IDX 1
#define mmUVD_SEMA_CNTL 0x02ef
#define mmUVD_SEMA_CNTL_BASE_IDX 1
#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x02f0
#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x02f1
#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_BASE_IDX 1
#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x02f2
#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
#define mmUVD_JOB_START 0x02f3
#define mmUVD_JOB_START_BASE_IDX 1
#define mmUVD_RBC_BUF_STATUS 0x02f4
#define mmUVD_RBC_BUF_STATUS_BASE_IDX 1
// addressBlock: uvd0_uvdgendec
// base address: 0x20470
#define mmUVD_LCM_CGC_CNTRL 0x033f
#define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1
#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x03a0
#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x03a1
#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x03a2
#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_MIF_CURR_ADDR_CONFIG 0x03ae
#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_MIF_REF_ADDR_CONFIG 0x03af
#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x03e1
#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1
// addressBlock: uvd0_lmi_adpdec
// base address: 0x20870
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x0432
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x0433
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x0434
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x0435
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x0438
#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x0439
#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x043a
#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x043b
#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x043c
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x043d
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x0468
#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x0469
#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x046a
#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x046b
#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x046c
#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x046d
#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x046e
#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x046f
#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0470
#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0471
#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0472
#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0473
#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0474
#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0475
#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0476
#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x0477
#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_SPH_64BIT_BAR_HIGH 0x047c
#define mmUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x047d
#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x047e
#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x047f
#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0480
#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0481
#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0482
#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0483
#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0484
#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0485
#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0486
#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x0487
#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x0488
#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x0489
#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x048a
#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x048b
#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x048c
#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_MMSCH_NC_VMID 0x048d
#define mmUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1
#define mmUVD_LMI_MMSCH_CTRL 0x048e
#define mmUVD_LMI_MMSCH_CTRL_BASE_IDX 1
#define mmUVD_LMI_ARB_CTRL2 0x049a
#define mmUVD_LMI_ARB_CTRL2_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x049f
#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1
#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI 0x04a0
#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1
#define mmUVD_LMI_LAT_CTRL 0x04a1
#define mmUVD_LMI_LAT_CTRL_BASE_IDX 1
#define mmUVD_LMI_LAT_CNTR 0x04a2
#define mmUVD_LMI_LAT_CNTR_BASE_IDX 1
#define mmUVD_LMI_AVG_LAT_CNTR 0x04a3
#define mmUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1
#define mmUVD_LMI_SPH 0x04a4
#define mmUVD_LMI_SPH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_VMID 0x04a5
#define mmUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1
#define mmUVD_LMI_CTRL2 0x04a6
#define mmUVD_LMI_CTRL2_BASE_IDX 1
#define mmUVD_LMI_URGENT_CTRL 0x04a7
#define mmUVD_LMI_URGENT_CTRL_BASE_IDX 1
#define mmUVD_LMI_CTRL 0x04a8
#define mmUVD_LMI_CTRL_BASE_IDX 1
#define mmUVD_LMI_STATUS 0x04a9
#define mmUVD_LMI_STATUS_BASE_IDX 1
#define mmUVD_LMI_PERFMON_CTRL 0x04ac
#define mmUVD_LMI_PERFMON_CTRL_BASE_IDX 1
#define mmUVD_LMI_PERFMON_COUNT_LO 0x04ad
#define mmUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1
#define mmUVD_LMI_PERFMON_COUNT_HI 0x04ae
#define mmUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1
#define mmUVD_LMI_RBC_RB_VMID 0x04b0
#define mmUVD_LMI_RBC_RB_VMID_BASE_IDX 1
#define mmUVD_LMI_RBC_IB_VMID 0x04b1
#define mmUVD_LMI_RBC_IB_VMID_BASE_IDX 1
#define mmUVD_LMI_MC_CREDITS 0x04b2
#define mmUVD_LMI_MC_CREDITS_BASE_IDX 1
// addressBlock: uvd0_uvdnpdec
// base address: 0x20bd0
#define mmMDM_DMA_CMD 0x06f4
#define mmMDM_DMA_CMD_BASE_IDX 1
#define mmMDM_DMA_STATUS 0x06f5
#define mmMDM_DMA_STATUS_BASE_IDX 1
#define mmMDM_DMA_CTL 0x06f6
#define mmMDM_DMA_CTL_BASE_IDX 1
#define mmMDM_ENC_PIPE_BUSY 0x06f7
#define mmMDM_ENC_PIPE_BUSY_BASE_IDX 1
#define mmMDM_WIG_PIPE_BUSY 0x06f9
#define mmMDM_WIG_PIPE_BUSY_BASE_IDX 1
/* VCN 2_6_0 regs */
#define mmUVD_RAS_VCPU_VCODEC_STATUS 0x0057
#define mmUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
#define mmUVD_RAS_MMSCH_FATAL_ERROR 0x0058
#define mmUVD_RAS_MMSCH_FATAL_ERROR_BASE_IDX 1
#define mmVCN_RAS_CNTL 0x04b9
#define mmVCN_RAS_CNTL_BASE_IDX 1
/* JPEG 2_6_0 regs */
#define mmUVD_RAS_JPEG0_STATUS 0x0059
#define mmUVD_RAS_JPEG0_STATUS_BASE_IDX 1
#define mmUVD_RAS_JPEG1_STATUS 0x005a
#define mmUVD_RAS_JPEG1_STATUS_BASE_IDX 1
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-thin-metadata.h"
#include "persistent-data/dm-btree.h"
#include "persistent-data/dm-space-map.h"
#include "persistent-data/dm-space-map-disk.h"
#include "persistent-data/dm-transaction-manager.h"
#include <linux/list.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
/*
*--------------------------------------------------------------------------
* As far as the metadata goes, there is:
*
* - A superblock in block zero, taking up fewer than 512 bytes for
* atomic writes.
*
* - A space map managing the metadata blocks.
*
* - A space map managing the data blocks.
*
* - A btree mapping our internal thin dev ids onto struct disk_device_details.
*
* - A hierarchical btree, with 2 levels which effectively maps (thin
* dev id, virtual block) -> block_time. Block time is a 64-bit
* field holding the time in the low 24 bits, and block in the top 40
* bits.
*
* BTrees consist solely of btree_nodes, that fill a block. Some are
* internal nodes, as such their values are a __le64 pointing to other
* nodes. Leaf nodes can store data of any reasonable size (ie. much
* smaller than the block size). The nodes consist of the header,
* followed by an array of keys, followed by an array of values. We have
* to binary search on the keys so they're all held together to help the
* cpu cache.
*
* Space maps have 2 btrees:
*
* - One maps a uint64_t onto a struct index_entry. Which points to a
* bitmap block, and has some details about how many free entries there
* are etc.
*
* - The bitmap blocks have a header (for the checksum). Then the rest
* of the block is pairs of bits. With the meaning being:
*
* 0 - ref count is 0
* 1 - ref count is 1
* 2 - ref count is 2
* 3 - ref count is higher than 2
*
* - If the count is higher than 2 then the ref count is entered in a
* second btree that directly maps the block_address to a uint32_t ref
* count.
*
* The space map metadata variant doesn't have a bitmaps btree. Instead
* it has one single blocks worth of index_entries. This avoids
* recursive issues with the bitmap btree needing to allocate space in
* order to insert. With a small data block size such as 64k the
* metadata support data devices that are hundreds of terrabytes.
*
* The space maps allocate space linearly from front to back. Space that
* is freed in a transaction is never recycled within that transaction.
* To try and avoid fragmenting _free_ space the allocator always goes
* back and fills in gaps.
*
* All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
* from the block manager.
*--------------------------------------------------------------------------
*/
#define DM_MSG_PREFIX "thin metadata"
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
#define THIN_VERSION 2
#define SECTOR_TO_BLOCK_SHIFT 3
/*
* For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
* For btree remove:
* 2 for shadow spine +
* 4 for rebalance 3 child node
*/
#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
/*
* Little endian on-disk superblock and device details.
*/
struct thin_disk_superblock {
__le32 csum; /* Checksum of superblock except for this field. */
__le32 flags;
__le64 blocknr; /* This block number, dm_block_t. */
__u8 uuid[16];
__le64 magic;
__le32 version;
__le32 time;
__le64 trans_id;
/*
* Root held by userspace transactions.
*/
__le64 held_root;
__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
/*
* 2-level btree mapping (dev_id, (dev block, time)) -> data block
*/
__le64 data_mapping_root;
/*
* Device detail root mapping dev_id -> device_details
*/
__le64 device_details_root;
__le32 data_block_size; /* In 512-byte sectors. */
__le32 metadata_block_size; /* In 512-byte sectors. */
__le64 metadata_nr_blocks;
__le32 compat_flags;
__le32 compat_ro_flags;
__le32 incompat_flags;
} __packed;
struct disk_device_details {
__le64 mapped_blocks;
__le64 transaction_id; /* When created. */
__le32 creation_time;
__le32 snapshotted_time;
} __packed;
struct dm_pool_metadata {
struct hlist_node hash;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
struct dm_space_map *data_sm;
struct dm_transaction_manager *tm;
struct dm_transaction_manager *nb_tm;
/*
* Two-level btree.
* First level holds thin_dev_t.
* Second level holds mappings.
*/
struct dm_btree_info info;
/*
* Non-blocking version of the above.
*/
struct dm_btree_info nb_info;
/*
* Just the top level for deleting whole devices.
*/
struct dm_btree_info tl_info;
/*
* Just the bottom level for creating new devices.
*/
struct dm_btree_info bl_info;
/*
* Describes the device details btree.
*/
struct dm_btree_info details_info;
struct rw_semaphore root_lock;
uint32_t time;
dm_block_t root;
dm_block_t details_root;
struct list_head thin_devices;
uint64_t trans_id;
unsigned long flags;
sector_t data_block_size;
/*
* Pre-commit callback.
*
* This allows the thin provisioning target to run a callback before
* the metadata are committed.
*/
dm_pool_pre_commit_fn pre_commit_fn;
void *pre_commit_context;
/*
* We reserve a section of the metadata for commit overhead.
* All reported space does *not* include this.
*/
dm_block_t metadata_reserve;
/*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
*/
bool fail_io:1;
/*
* Set once a thin-pool has been accessed through one of the interfaces
* that imply the pool is in-service (e.g. thin devices created/deleted,
* thin-pool message, metadata snapshots, etc).
*/
bool in_service:1;
/*
* Reading the space map roots can fail, so we read it into these
* buffers before the superblock is locked and updated.
*/
__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
};
struct dm_thin_device {
struct list_head list;
struct dm_pool_metadata *pmd;
dm_thin_id id;
int open_count;
bool changed:1;
bool aborted_with_changes:1;
uint64_t mapped_blocks;
uint64_t transaction_id;
uint32_t creation_time;
uint32_t snapshotted_time;
};
/*
*--------------------------------------------------------------
* superblock validator
*--------------------------------------------------------------
*/
#define SUPERBLOCK_CSUM_XOR 160774
static void sb_prepare_for_write(const struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct thin_disk_superblock *disk_super = dm_block_data(b);
disk_super->blocknr = cpu_to_le64(dm_block_location(b));
disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
}
static int sb_check(const struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct thin_disk_superblock *disk_super = dm_block_data(b);
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
DMERR("%s failed: blocknr %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
DMERR("%s failed: magic %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->magic),
(unsigned long long)THIN_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
DMERR("%s failed: csum %u: wanted %u",
__func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
return 0;
}
static const struct dm_block_validator sb_validator = {
.name = "superblock",
.prepare_for_write = sb_prepare_for_write,
.check = sb_check
};
/*
*--------------------------------------------------------------
* Methods for the btree value types
*--------------------------------------------------------------
*/
static uint64_t pack_block_time(dm_block_t b, uint32_t t)
{
return (b << 24) | t;
}
static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
{
*b = v >> 24;
*t = v & ((1 << 24) - 1);
}
/*
* It's more efficient to call dm_sm_{inc,dec}_blocks as few times as
* possible. 'with_runs' reads contiguous runs of blocks, and calls the
* given sm function.
*/
typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
{
uint64_t b, begin, end;
uint32_t t;
bool in_run = false;
unsigned int i;
for (i = 0; i < count; i++, value_le++) {
/* We know value_le is 8 byte aligned */
unpack_block_time(le64_to_cpu(*value_le), &b, &t);
if (in_run) {
if (b == end) {
end++;
} else {
fn(sm, begin, end);
begin = b;
end = b + 1;
}
} else {
in_run = true;
begin = b;
end = b + 1;
}
}
if (in_run)
fn(sm, begin, end);
}
static void data_block_inc(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_inc_blocks);
}
static void data_block_dec(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_dec_blocks);
}
static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
uint64_t b1, b2;
uint32_t t;
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
return b1 == b2;
}
static void subtree_inc(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
unsigned int i;
for (i = 0; i < count; i++, root_le++)
dm_tm_inc(info->tm, le64_to_cpu(*root_le));
}
static void subtree_dec(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
unsigned int i;
for (i = 0; i < count; i++, root_le++)
if (dm_btree_del(info, le64_to_cpu(*root_le)))
DMERR("btree delete failed");
}
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
return v1_le == v2_le;
}
/*----------------------------------------------------------------*/
/*
* Variant that is used for in-core only changes or code that
* shouldn't put the pool in service on its own (e.g. commit).
*/
static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
__acquires(pmd->root_lock)
{
down_write(&pmd->root_lock);
}
static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
{
pmd_write_lock_in_core(pmd);
if (unlikely(!pmd->in_service))
pmd->in_service = true;
}
static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
__releases(pmd->root_lock)
{
up_write(&pmd->root_lock);
}
/*----------------------------------------------------------------*/
static int superblock_lock_zero(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int superblock_lock(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
*/
r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
if (r)
return r;
data_le = dm_block_data(b);
*result = 1;
for (i = 0; i < block_size; i++) {
if (data_le[i] != zero) {
*result = 0;
break;
}
}
dm_bm_unlock(b);
return 0;
}
static void __setup_btree_details(struct dm_pool_metadata *pmd)
{
pmd->info.tm = pmd->tm;
pmd->info.levels = 2;
pmd->info.value_type.context = pmd->data_sm;
pmd->info.value_type.size = sizeof(__le64);
pmd->info.value_type.inc = data_block_inc;
pmd->info.value_type.dec = data_block_dec;
pmd->info.value_type.equal = data_block_equal;
memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
pmd->nb_info.tm = pmd->nb_tm;
pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
pmd->tl_info.value_type.context = &pmd->bl_info;
pmd->tl_info.value_type.size = sizeof(__le64);
pmd->tl_info.value_type.inc = subtree_inc;
pmd->tl_info.value_type.dec = subtree_dec;
pmd->tl_info.value_type.equal = subtree_equal;
pmd->bl_info.tm = pmd->tm;
pmd->bl_info.levels = 1;
pmd->bl_info.value_type.context = pmd->data_sm;
pmd->bl_info.value_type.size = sizeof(__le64);
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
pmd->bl_info.value_type.equal = data_block_equal;
pmd->details_info.tm = pmd->tm;
pmd->details_info.levels = 1;
pmd->details_info.value_type.context = NULL;
pmd->details_info.value_type.size = sizeof(struct disk_device_details);
pmd->details_info.value_type.inc = NULL;
pmd->details_info.value_type.dec = NULL;
pmd->details_info.value_type.equal = NULL;
}
static int save_sm_roots(struct dm_pool_metadata *pmd)
{
int r;
size_t len;
r = dm_sm_root_size(pmd->metadata_sm, &len);
if (r < 0)
return r;
r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
if (r < 0)
return r;
r = dm_sm_root_size(pmd->data_sm, &len);
if (r < 0)
return r;
return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
}
static void copy_sm_roots(struct dm_pool_metadata *pmd,
struct thin_disk_superblock *disk)
{
memcpy(&disk->metadata_space_map_root,
&pmd->metadata_space_map_root,
sizeof(pmd->metadata_space_map_root));
memcpy(&disk->data_space_map_root,
&pmd->data_space_map_root,
sizeof(pmd->data_space_map_root));
}
static int __write_initial_superblock(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
r = dm_sm_commit(pmd->data_sm);
if (r < 0)
return r;
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
r = save_sm_roots(pmd);
if (r < 0)
return r;
r = superblock_lock_zero(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
disk_super->version = cpu_to_le32(THIN_VERSION);
disk_super->time = 0;
disk_super->trans_id = 0;
disk_super->held_root = 0;
copy_sm_roots(pmd, disk_super);
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
return dm_tm_commit(pmd->tm, sblock);
}
static int __format_metadata(struct dm_pool_metadata *pmd)
{
int r;
r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&pmd->tm, &pmd->metadata_sm);
if (r < 0) {
pmd->tm = NULL;
pmd->metadata_sm = NULL;
DMERR("tm_create_with_sm failed");
return r;
}
pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
if (IS_ERR(pmd->data_sm)) {
DMERR("sm_disk_create failed");
r = PTR_ERR(pmd->data_sm);
pmd->data_sm = NULL;
goto bad_cleanup_tm;
}
pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
if (!pmd->nb_tm) {
DMERR("could not create non-blocking clone tm");
r = -ENOMEM;
goto bad_cleanup_data_sm;
}
__setup_btree_details(pmd);
r = dm_btree_empty(&pmd->info, &pmd->root);
if (r < 0)
goto bad_cleanup_nb_tm;
r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
if (r < 0) {
DMERR("couldn't create devices root");
goto bad_cleanup_nb_tm;
}
r = __write_initial_superblock(pmd);
if (r)
goto bad_cleanup_nb_tm;
return 0;
bad_cleanup_nb_tm:
dm_tm_destroy(pmd->nb_tm);
pmd->nb_tm = NULL;
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
pmd->data_sm = NULL;
bad_cleanup_tm:
dm_tm_destroy(pmd->tm);
pmd->tm = NULL;
dm_sm_destroy(pmd->metadata_sm);
pmd->metadata_sm = NULL;
return r;
}
static int __check_incompat_features(struct thin_disk_superblock *disk_super,
struct dm_pool_metadata *pmd)
{
uint32_t features;
features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
if (features) {
DMERR("could not access metadata due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
if (bdev_read_only(pmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
if (features) {
DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
return 0;
}
static int __open_metadata(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r < 0) {
DMERR("couldn't read superblock");
return r;
}
disk_super = dm_block_data(sblock);
/* Verify the data block size hasn't changed */
if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
DMERR("changing the data block size (from %u to %llu) is not supported",
le32_to_cpu(disk_super->data_block_size),
(unsigned long long)pmd->data_block_size);
r = -EINVAL;
goto bad_unlock_sblock;
}
r = __check_incompat_features(disk_super, pmd);
if (r < 0)
goto bad_unlock_sblock;
r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
disk_super->metadata_space_map_root,
sizeof(disk_super->metadata_space_map_root),
&pmd->tm, &pmd->metadata_sm);
if (r < 0) {
pmd->tm = NULL;
pmd->metadata_sm = NULL;
DMERR("tm_open_with_sm failed");
goto bad_unlock_sblock;
}
pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
sizeof(disk_super->data_space_map_root));
if (IS_ERR(pmd->data_sm)) {
DMERR("sm_disk_open failed");
r = PTR_ERR(pmd->data_sm);
pmd->data_sm = NULL;
goto bad_cleanup_tm;
}
pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
if (!pmd->nb_tm) {
DMERR("could not create non-blocking clone tm");
r = -ENOMEM;
goto bad_cleanup_data_sm;
}
/*
* For pool metadata opening process, root setting is redundant
* because it will be set again in __begin_transaction(). But dm
* pool aborting process really needs to get last transaction's
* root to avoid accessing broken btree.
*/
pmd->root = le64_to_cpu(disk_super->data_mapping_root);
pmd->details_root = le64_to_cpu(disk_super->device_details_root);
__setup_btree_details(pmd);
dm_bm_unlock(sblock);
return 0;
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
pmd->data_sm = NULL;
bad_cleanup_tm:
dm_tm_destroy(pmd->tm);
pmd->tm = NULL;
dm_sm_destroy(pmd->metadata_sm);
pmd->metadata_sm = NULL;
bad_unlock_sblock:
dm_bm_unlock(sblock);
return r;
}
static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
{
int r, unformatted;
r = __superblock_all_zeroes(pmd->bm, &unformatted);
if (r)
return r;
if (unformatted)
return format_device ? __format_metadata(pmd) : -EPERM;
return __open_metadata(pmd);
}
static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
{
int r;
pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
r = PTR_ERR(pmd->bm);
pmd->bm = NULL;
return r;
}
r = __open_or_format_metadata(pmd, format_device);
if (r) {
dm_block_manager_destroy(pmd->bm);
pmd->bm = NULL;
}
return r;
}
static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
bool destroy_bm)
{
dm_sm_destroy(pmd->data_sm);
pmd->data_sm = NULL;
dm_sm_destroy(pmd->metadata_sm);
pmd->metadata_sm = NULL;
dm_tm_destroy(pmd->nb_tm);
pmd->nb_tm = NULL;
dm_tm_destroy(pmd->tm);
pmd->tm = NULL;
if (destroy_bm)
dm_block_manager_destroy(pmd->bm);
}
static int __begin_transaction(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We re-read the superblock every time. Shouldn't need to do this
* really.
*/
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
pmd->time = le32_to_cpu(disk_super->time);
pmd->root = le64_to_cpu(disk_super->data_mapping_root);
pmd->details_root = le64_to_cpu(disk_super->device_details_root);
pmd->trans_id = le64_to_cpu(disk_super->trans_id);
pmd->flags = le32_to_cpu(disk_super->flags);
pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
dm_bm_unlock(sblock);
return 0;
}
static int __write_changed_details(struct dm_pool_metadata *pmd)
{
int r;
struct dm_thin_device *td, *tmp;
struct disk_device_details details;
uint64_t key;
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (!td->changed)
continue;
key = td->id;
details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
details.transaction_id = cpu_to_le64(td->transaction_id);
details.creation_time = cpu_to_le32(td->creation_time);
details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
__dm_bless_for_disk(&details);
r = dm_btree_insert(&pmd->details_info, pmd->details_root,
&key, &details, &pmd->details_root);
if (r)
return r;
if (td->open_count)
td->changed = false;
else {
list_del(&td->list);
kfree(td);
}
}
return 0;
}
static int __commit_transaction(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We need to know if the thin_disk_superblock exceeds a 512-byte sector.
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
BUG_ON(!rwsem_is_locked(&pmd->root_lock));
if (unlikely(!pmd->in_service))
return 0;
if (pmd->pre_commit_fn) {
r = pmd->pre_commit_fn(pmd->pre_commit_context);
if (r < 0) {
DMERR("pre-commit callback failed");
return r;
}
}
r = __write_changed_details(pmd);
if (r < 0)
return r;
r = dm_sm_commit(pmd->data_sm);
if (r < 0)
return r;
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
r = save_sm_roots(pmd);
if (r < 0)
return r;
r = superblock_lock(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->time = cpu_to_le32(pmd->time);
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
disk_super->trans_id = cpu_to_le64(pmd->trans_id);
disk_super->flags = cpu_to_le32(pmd->flags);
copy_sm_roots(pmd, disk_super);
return dm_tm_commit(pmd->tm, sblock);
}
static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
{
int r;
dm_block_t total;
dm_block_t max_blocks = 4096; /* 16M */
r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
if (r) {
DMERR("could not get size of metadata device");
pmd->metadata_reserve = max_blocks;
} else
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
{
int r;
struct dm_pool_metadata *pmd;
pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
if (!pmd) {
DMERR("could not allocate metadata struct");
return ERR_PTR(-ENOMEM);
}
init_rwsem(&pmd->root_lock);
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
pmd->fail_io = false;
pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
pmd->pre_commit_fn = NULL;
pmd->pre_commit_context = NULL;
r = __create_persistent_data_objects(pmd, format_device);
if (r) {
kfree(pmd);
return ERR_PTR(r);
}
r = __begin_transaction(pmd);
if (r < 0) {
if (dm_pool_metadata_close(pmd) < 0)
DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
return ERR_PTR(r);
}
__set_metadata_reserve(pmd);
return pmd;
}
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
{
int r;
unsigned int open_devices = 0;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (td->open_count)
open_devices++;
else {
list_del(&td->list);
kfree(td);
}
}
up_read(&pmd->root_lock);
if (open_devices) {
DMERR("attempt to close pmd when %u device(s) are still open",
open_devices);
return -EBUSY;
}
pmd_write_lock_in_core(pmd);
if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
pmd_write_unlock(pmd);
__destroy_persistent_data_objects(pmd, true);
kfree(pmd);
return 0;
}
/*
* __open_device: Returns @td corresponding to device with id @dev,
* creating it if @create is set and incrementing @td->open_count.
* On failure, @td is undefined.
*/
static int __open_device(struct dm_pool_metadata *pmd,
dm_thin_id dev, int create,
struct dm_thin_device **td)
{
int r, changed = 0;
struct dm_thin_device *td2;
uint64_t key = dev;
struct disk_device_details details_le;
/*
* If the device is already open, return it.
*/
list_for_each_entry(td2, &pmd->thin_devices, list)
if (td2->id == dev) {
/*
* May not create an already-open device.
*/
if (create)
return -EEXIST;
td2->open_count++;
*td = td2;
return 0;
}
/*
* Check the device exists.
*/
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&key, &details_le);
if (r) {
if (r != -ENODATA || !create)
return r;
/*
* Create new device.
*/
changed = 1;
details_le.mapped_blocks = 0;
details_le.transaction_id = cpu_to_le64(pmd->trans_id);
details_le.creation_time = cpu_to_le32(pmd->time);
details_le.snapshotted_time = cpu_to_le32(pmd->time);
}
*td = kmalloc(sizeof(**td), GFP_NOIO);
if (!*td)
return -ENOMEM;
(*td)->pmd = pmd;
(*td)->id = dev;
(*td)->open_count = 1;
(*td)->changed = changed;
(*td)->aborted_with_changes = false;
(*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
(*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
(*td)->creation_time = le32_to_cpu(details_le.creation_time);
(*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
list_add(&(*td)->list, &pmd->thin_devices);
return 0;
}
static void __close_device(struct dm_thin_device *td)
{
--td->open_count;
}
static int __create_thin(struct dm_pool_metadata *pmd,
dm_thin_id dev)
{
int r;
dm_block_t dev_root;
uint64_t key = dev;
struct dm_thin_device *td;
__le64 value;
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&key, NULL);
if (!r)
return -EEXIST;
/*
* Create an empty btree for the mappings.
*/
r = dm_btree_empty(&pmd->bl_info, &dev_root);
if (r)
return r;
/*
* Insert it into the main mapping tree.
*/
value = cpu_to_le64(dev_root);
__dm_bless_for_disk(&value);
r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
if (r) {
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
r = __open_device(pmd, dev, 1, &td);
if (r) {
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
__close_device(td);
return r;
}
int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_thin(pmd, dev);
pmd_write_unlock(pmd);
return r;
}
static int __set_snapshot_details(struct dm_pool_metadata *pmd,
struct dm_thin_device *snap,
dm_thin_id origin, uint32_t time)
{
int r;
struct dm_thin_device *td;
r = __open_device(pmd, origin, 0, &td);
if (r)
return r;
td->changed = true;
td->snapshotted_time = time;
snap->mapped_blocks = td->mapped_blocks;
snap->snapshotted_time = time;
__close_device(td);
return 0;
}
static int __create_snap(struct dm_pool_metadata *pmd,
dm_thin_id dev, dm_thin_id origin)
{
int r;
dm_block_t origin_root;
uint64_t key = origin, dev_key = dev;
struct dm_thin_device *td;
__le64 value;
/* check this device is unused */
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&dev_key, NULL);
if (!r)
return -EEXIST;
/* find the mapping tree for the origin */
r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
if (r)
return r;
origin_root = le64_to_cpu(value);
/* clone the origin, an inc will do */
dm_tm_inc(pmd->tm, origin_root);
/* insert into the main mapping tree */
value = cpu_to_le64(origin_root);
__dm_bless_for_disk(&value);
key = dev;
r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
if (r) {
dm_tm_dec(pmd->tm, origin_root);
return r;
}
pmd->time++;
r = __open_device(pmd, dev, 1, &td);
if (r)
goto bad;
r = __set_snapshot_details(pmd, td, origin, pmd->time);
__close_device(td);
if (r)
goto bad;
return 0;
bad:
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
return r;
}
int dm_pool_create_snap(struct dm_pool_metadata *pmd,
dm_thin_id dev,
dm_thin_id origin)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_snap(pmd, dev, origin);
pmd_write_unlock(pmd);
return r;
}
static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r;
uint64_t key = dev;
struct dm_thin_device *td;
/* TODO: failure should mark the transaction invalid */
r = __open_device(pmd, dev, 0, &td);
if (r)
return r;
if (td->open_count > 1) {
__close_device(td);
return -EBUSY;
}
list_del(&td->list);
kfree(td);
r = dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
if (r)
return r;
r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
if (r)
return r;
return 0;
}
int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
dm_thin_id dev)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __delete_device(pmd, dev);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t current_id,
uint64_t new_id)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
if (pmd->trans_id != current_id) {
DMERR("mismatched transaction id");
goto out;
}
pmd->trans_id = new_id;
r = 0;
out:
pmd_write_unlock(pmd);
return r;
}
int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
*result = pmd->trans_id;
r = 0;
}
up_read(&pmd->root_lock);
return r;
}
static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r, inc;
struct thin_disk_superblock *disk_super;
struct dm_block *copy, *sblock;
dm_block_t held_root;
/*
* We commit to ensure the btree roots which we increment in a
* moment are up to date.
*/
r = __commit_transaction(pmd);
if (r < 0) {
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
return r;
}
/*
* Copy the superblock.
*/
dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, ©, &inc);
if (r)
return r;
BUG_ON(!inc);
held_root = dm_block_location(copy);
disk_super = dm_block_data(copy);
if (le64_to_cpu(disk_super->held_root)) {
DMWARN("Pool metadata snapshot already exists: release this before taking another.");
dm_tm_dec(pmd->tm, held_root);
dm_tm_unlock(pmd->tm, copy);
return -EBUSY;
}
/*
* Wipe the spacemap since we're not publishing this.
*/
memset(&disk_super->data_space_map_root, 0,
sizeof(disk_super->data_space_map_root));
memset(&disk_super->metadata_space_map_root, 0,
sizeof(disk_super->metadata_space_map_root));
/*
* Increment the data structures that need to be preserved.
*/
dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
dm_tm_unlock(pmd->tm, copy);
/*
* Write the held root into the superblock.
*/
r = superblock_lock(pmd, &sblock);
if (r) {
dm_tm_dec(pmd->tm, held_root);
return r;
}
disk_super = dm_block_data(sblock);
disk_super->held_root = cpu_to_le64(held_root);
dm_bm_unlock(sblock);
return 0;
}
int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __reserve_metadata_snap(pmd);
pmd_write_unlock(pmd);
return r;
}
static int __release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock, *copy;
dm_block_t held_root;
r = superblock_lock(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
held_root = le64_to_cpu(disk_super->held_root);
disk_super->held_root = cpu_to_le64(0);
dm_bm_unlock(sblock);
if (!held_root) {
DMWARN("No pool metadata snapshot found: nothing to release.");
return -EINVAL;
}
r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©);
if (r)
return r;
disk_super = dm_block_data(copy);
dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
dm_tm_unlock(pmd->tm, copy);
return 0;
}
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __release_metadata_snap(pmd);
pmd_write_unlock(pmd);
return r;
}
static int __get_metadata_snap(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
dm_bm_unlock(sblock);
return 0;
}
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = __get_metadata_snap(pmd, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
struct dm_thin_device **td)
{
int r = -EINVAL;
pmd_write_lock_in_core(pmd);
if (!pmd->fail_io)
r = __open_device(pmd, dev, 0, td);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_close_thin_device(struct dm_thin_device *td)
{
pmd_write_lock_in_core(td->pmd);
__close_device(td);
pmd_write_unlock(td->pmd);
return 0;
}
dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
{
return td->id;
}
/*
* Check whether @time (of block creation) is older than @td's last snapshot.
* If so then the associated block is shared with the last snapshot device.
* Any block on a device created *after* the device last got snapshotted is
* necessarily not shared.
*/
static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
{
return td->snapshotted_time > time;
}
static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
struct dm_thin_lookup_result *result)
{
uint64_t block_time = 0;
dm_block_t exception_block;
uint32_t exception_time;
block_time = le64_to_cpu(value);
unpack_block_time(block_time, &exception_block, &exception_time);
result->block = exception_block;
result->shared = __snapshotted_since(td, exception_time);
}
static int __find_block(struct dm_thin_device *td, dm_block_t block,
int can_issue_io, struct dm_thin_lookup_result *result)
{
int r;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
struct dm_btree_info *info;
if (can_issue_io)
info = &pmd->info;
else
info = &pmd->nb_info;
r = dm_btree_lookup(info, pmd->root, keys, &value);
if (!r)
unpack_lookup_result(td, value, result);
return r;
}
int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
int can_issue_io, struct dm_thin_lookup_result *result)
{
int r;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (pmd->fail_io) {
up_read(&pmd->root_lock);
return -EINVAL;
}
r = __find_block(td, block, can_issue_io, result);
up_read(&pmd->root_lock);
return r;
}
static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t *vblock,
struct dm_thin_lookup_result *result)
{
int r;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
if (!r)
unpack_lookup_result(td, value, result);
return r;
}
static int __find_mapped_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end,
dm_block_t *thin_begin, dm_block_t *thin_end,
dm_block_t *pool_begin, bool *maybe_shared)
{
int r;
dm_block_t pool_end;
struct dm_thin_lookup_result lookup;
if (end < begin)
return -ENODATA;
r = __find_next_mapped_block(td, begin, &begin, &lookup);
if (r)
return r;
if (begin >= end)
return -ENODATA;
*thin_begin = begin;
*pool_begin = lookup.block;
*maybe_shared = lookup.shared;
begin++;
pool_end = *pool_begin + 1;
while (begin != end) {
r = __find_block(td, begin, true, &lookup);
if (r) {
if (r == -ENODATA)
break;
return r;
}
if ((lookup.block != pool_end) ||
(lookup.shared != *maybe_shared))
break;
pool_end++;
begin++;
}
*thin_end = begin;
return 0;
}
int dm_thin_find_mapped_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end,
dm_block_t *thin_begin, dm_block_t *thin_end,
dm_block_t *pool_begin, bool *maybe_shared)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
pool_begin, maybe_shared);
}
up_read(&pmd->root_lock);
return r;
}
static int __insert(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
int r, inserted;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
value = cpu_to_le64(pack_block_time(data_block, pmd->time));
__dm_bless_for_disk(&value);
r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
&pmd->root, &inserted);
if (r)
return r;
td->changed = true;
if (inserted)
td->mapped_blocks++;
return 0;
}
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __insert(td, block, data_block);
pmd_write_unlock(td->pmd);
return r;
}
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
unsigned int count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;
dm_block_t mapping_root;
/*
* Find the mapping tree
*/
r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
if (r)
return r;
/*
* Remove from the mapping tree, taking care to inc the
* ref count so it doesn't get deleted.
*/
mapping_root = le64_to_cpu(value);
dm_tm_inc(pmd->tm, mapping_root);
r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
if (r)
return r;
/*
* Remove leaves stops at the first unmapped entry, so we have to
* loop round finding mapped ranges.
*/
while (begin < end) {
r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
if (r == -ENODATA)
break;
if (r)
return r;
if (begin >= end)
break;
r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
if (r)
return r;
total_count += count;
}
td->mapped_blocks -= total_count;
td->changed = true;
/*
* Reinsert the mapping tree.
*/
value = cpu_to_le64(mapping_root);
__dm_bless_for_disk(&value);
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
}
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove_range(td, begin, end);
pmd_write_unlock(td->pmd);
return r;
}
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{
int r = -EINVAL;
uint32_t ref_count;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
if (!r)
*result = (ref_count > 1);
}
up_read(&pmd->root_lock);
return r;
}
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_inc_blocks(pmd->data_sm, b, e);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_dec_blocks(pmd->data_sm, b, e);
pmd_write_unlock(pmd);
return r;
}
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
down_read(&td->pmd->root_lock);
r = td->changed;
up_read(&td->pmd->root_lock);
return r;
}
bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
{
bool r = false;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (td->changed) {
r = td->changed;
break;
}
}
up_read(&pmd->root_lock);
return r;
}
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
down_read(&td->pmd->root_lock);
r = td->aborted_with_changes;
up_read(&td->pmd->root_lock);
return r;
}
int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_new_block(pmd->data_sm, result);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
/*
* Care is taken to not have commit be what
* triggers putting the thin-pool in-service.
*/
pmd_write_lock_in_core(pmd);
if (pmd->fail_io)
goto out;
r = __commit_transaction(pmd);
if (r < 0)
goto out;
/*
* Open the next transaction.
*/
r = __begin_transaction(pmd);
out:
pmd_write_unlock(pmd);
return r;
}
static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
{
struct dm_thin_device *td;
list_for_each_entry(td, &pmd->thin_devices, list)
td->aborted_with_changes = td->changed;
}
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
/* fail_io is double-checked with pmd->root_lock held below */
if (unlikely(pmd->fail_io))
return r;
pmd_write_lock(pmd);
if (pmd->fail_io) {
pmd_write_unlock(pmd);
return r;
}
__set_abort_with_changes_flags(pmd);
/* destroy data_sm/metadata_sm/nb_tm/tm */
__destroy_persistent_data_objects(pmd, false);
/* reset bm */
dm_block_manager_reset(pmd->bm);
/* rebuild data_sm/metadata_sm/nb_tm/tm */
r = __open_or_format_metadata(pmd, false);
if (r)
pmd->fail_io = true;
pmd_write_unlock(pmd);
return r;
}
int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
if (!r) {
if (*result < pmd->metadata_reserve)
*result = 0;
else
*result -= pmd->metadata_reserve;
}
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_blocks(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
*result = td->mapped_blocks;
r = 0;
}
up_read(&pmd->root_lock);
return r;
}
static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
{
int r;
__le64 value_le;
dm_block_t thin_root;
struct dm_pool_metadata *pmd = td->pmd;
r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
if (r)
return r;
thin_root = le64_to_cpu(value_le);
return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
}
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
dm_block_t *result)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = __highest_block(td, result);
up_read(&pmd->root_lock);
return r;
}
static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
{
int r;
dm_block_t old_count;
r = dm_sm_get_nr_blocks(sm, &old_count);
if (r)
return r;
if (new_count == old_count)
return 0;
if (new_count < old_count) {
DMERR("cannot reduce size of space map");
return -EINVAL;
}
return dm_sm_extend(sm, new_count - old_count);
}
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __resize_space_map(pmd->data_sm, new_count);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
pmd_write_unlock(pmd);
return r;
}
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
pmd_write_lock_in_core(pmd);
dm_bm_set_read_only(pmd->bm);
pmd_write_unlock(pmd);
}
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
pmd_write_lock_in_core(pmd);
dm_bm_set_read_write(pmd->bm);
pmd_write_unlock(pmd);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
int r = -EINVAL;
pmd_write_lock_in_core(pmd);
if (!pmd->fail_io) {
r = dm_sm_register_threshold_callback(pmd->metadata_sm,
threshold, fn, context);
}
pmd_write_unlock(pmd);
return r;
}
void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
dm_pool_pre_commit_fn fn,
void *context)
{
pmd_write_lock_in_core(pmd);
pmd->pre_commit_fn = fn;
pmd->pre_commit_context = context;
pmd_write_unlock(pmd);
}
int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
r = superblock_lock(pmd, &sblock);
if (r) {
DMERR("couldn't lock superblock");
goto out;
}
disk_super = dm_block_data(sblock);
disk_super->flags = cpu_to_le32(pmd->flags);
dm_bm_unlock(sblock);
out:
pmd_write_unlock(pmd);
return r;
}
bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
{
bool needs_check;
down_read(&pmd->root_lock);
needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
up_read(&pmd->root_lock);
return needs_check;
}
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
down_read(&pmd->root_lock);
if (!pmd->fail_io)
dm_tm_issue_prefetches(pmd->tm);
up_read(&pmd->root_lock);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/* MCP23S08 SPI GPIO driver */
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include "pinctrl-mcp23s08.h"
#define MCP_MAX_DEV_PER_CS 8
/*
* A given spi_device can represent up to eight mcp23sxx chips
* sharing the same chipselect but using different addresses
* (e.g. chips #0 and #3 might be populated, but not #1 or #2).
* Driver data holds all the per-chip data.
*/
struct mcp23s08_driver_data {
unsigned ngpio;
struct mcp23s08 *mcp[8];
struct mcp23s08 chip[];
};
static int mcp23sxx_spi_write(void *context, const void *data, size_t count)
{
struct mcp23s08 *mcp = context;
struct spi_device *spi = to_spi_device(mcp->dev);
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
{ .tx_buf = data, .len = count, }, };
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
return spi_sync(spi, &m);
}
static int mcp23sxx_spi_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
struct mcp23s08 *mcp = context;
struct spi_device *spi = to_spi_device(mcp->dev);
struct spi_message m;
struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
{ .tx_buf = reg, .len = reg_size, },
{ .tx_buf = val, .len = val_size, }, };
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
spi_message_add_tail(&t[2], &m);
return spi_sync(spi, &m);
}
static int mcp23sxx_spi_read(void *context, const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct mcp23s08 *mcp = context;
struct spi_device *spi = to_spi_device(mcp->dev);
u8 tx[2];
if (reg_size != 1)
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = *((u8 *) reg);
return spi_write_then_read(spi, tx, sizeof(tx), val, val_size);
}
static const struct regmap_bus mcp23sxx_spi_regmap = {
.write = mcp23sxx_spi_write,
.gather_write = mcp23sxx_spi_gather_write,
.read = mcp23sxx_spi_read,
};
static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
unsigned int addr,
const struct mcp23s08_info *info)
{
struct regmap_config *copy;
const char *name;
switch (info->type) {
case MCP_TYPE_S08:
mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
if (!mcp->chip.label)
return -ENOMEM;
name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
if (!name)
return -ENOMEM;
break;
case MCP_TYPE_S17:
mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
if (!mcp->chip.label)
return -ENOMEM;
name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
if (!name)
return -ENOMEM;
break;
case MCP_TYPE_S18:
mcp->chip.label = info->label;
name = info->regmap->name;
break;
default:
dev_err(dev, "invalid device type (%d)\n", info->type);
return -EINVAL;
}
mcp->reg_shift = info->reg_shift;
mcp->chip.ngpio = info->ngpio;
copy = devm_kmemdup(dev, info->regmap, sizeof(*info->regmap), GFP_KERNEL);
if (!copy)
return -ENOMEM;
copy->name = name;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
if (IS_ERR(mcp->regmap))
dev_err(dev, "regmap init failed for %s\n", mcp->chip.label);
return PTR_ERR_OR_ZERO(mcp->regmap);
}
static int mcp23s08_probe(struct spi_device *spi)
{
struct mcp23s08_driver_data *data;
const struct mcp23s08_info *info;
struct device *dev = &spi->dev;
unsigned long spi_present_mask;
unsigned int ngpio = 0;
unsigned int addr;
int chips;
int ret;
u32 v;
info = spi_get_device_match_data(spi);
ret = device_property_read_u32(dev, "microchip,spi-present-mask", &v);
if (ret) {
ret = device_property_read_u32(dev, "mcp,spi-present-mask", &v);
if (ret) {
dev_err(dev, "missing spi-present-mask");
return ret;
}
}
spi_present_mask = v;
if (!spi_present_mask || spi_present_mask >= BIT(MCP_MAX_DEV_PER_CS)) {
dev_err(dev, "invalid spi-present-mask");
return -ENODEV;
}
chips = hweight_long(spi_present_mask);
data = devm_kzalloc(dev, struct_size(data, chip, chips), GFP_KERNEL);
if (!data)
return -ENOMEM;
spi_set_drvdata(spi, data);
for_each_set_bit(addr, &spi_present_mask, MCP_MAX_DEV_PER_CS) {
data->mcp[addr] = &data->chip[--chips];
data->mcp[addr]->irq = spi->irq;
ret = mcp23s08_spi_regmap_init(data->mcp[addr], dev, addr, info);
if (ret)
return ret;
data->mcp[addr]->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
"mcp23xxx-pinctrl.%d",
addr);
if (!data->mcp[addr]->pinctrl_desc.name)
return -ENOMEM;
ret = mcp23s08_probe_one(data->mcp[addr], dev, 0x40 | (addr << 1),
info->type, -1);
if (ret < 0)
return ret;
ngpio += data->mcp[addr]->chip.ngpio;
}
data->ngpio = ngpio;
return 0;
}
static const struct mcp23s08_info mcp23s08_spi = {
.regmap = &mcp23x08_regmap,
.type = MCP_TYPE_S08,
.ngpio = 8,
.reg_shift = 0,
};
static const struct mcp23s08_info mcp23s17_spi = {
.regmap = &mcp23x17_regmap,
.type = MCP_TYPE_S17,
.ngpio = 16,
.reg_shift = 1,
};
static const struct mcp23s08_info mcp23s18_spi = {
.regmap = &mcp23x17_regmap,
.label = "mcp23s18",
.type = MCP_TYPE_S18,
.ngpio = 16,
.reg_shift = 1,
};
static const struct spi_device_id mcp23s08_ids[] = {
{ "mcp23s08", (kernel_ulong_t)&mcp23s08_spi },
{ "mcp23s17", (kernel_ulong_t)&mcp23s17_spi },
{ "mcp23s18", (kernel_ulong_t)&mcp23s18_spi },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
static const struct of_device_id mcp23s08_spi_of_match[] = {
{ .compatible = "microchip,mcp23s08", .data = &mcp23s08_spi },
{ .compatible = "microchip,mcp23s17", .data = &mcp23s17_spi },
{ .compatible = "microchip,mcp23s18", .data = &mcp23s18_spi },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{ .compatible = "mcp,mcp23s08", .data = &mcp23s08_spi },
{ .compatible = "mcp,mcp23s17", .data = &mcp23s17_spi },
{ }
};
MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
static struct spi_driver mcp23s08_driver = {
.probe = mcp23s08_probe,
.id_table = mcp23s08_ids,
.driver = {
.name = "mcp23s08",
.of_match_table = mcp23s08_spi_of_match,
},
};
static int __init mcp23s08_spi_init(void)
{
return spi_register_driver(&mcp23s08_driver);
}
/*
* Register after SPI postcore initcall and before
* subsys initcalls that may rely on these GPIOs.
*/
subsys_initcall(mcp23s08_spi_init);
static void mcp23s08_spi_exit(void)
{
spi_unregister_driver(&mcp23s08_driver);
}
module_exit(mcp23s08_spi_exit);
MODULE_DESCRIPTION("MCP23S08 SPI GPIO driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/sched/signal.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
/**
* mei_me_cl_init - initialize me client
*
* @me_cl: me client
*/
void mei_me_cl_init(struct mei_me_client *me_cl)
{
INIT_LIST_HEAD(&me_cl->list);
kref_init(&me_cl->refcnt);
}
/**
* mei_me_cl_get - increases me client refcount
*
* @me_cl: me client
*
* Locking: called under "dev->device_lock" lock
*
* Return: me client or NULL
*/
struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
{
if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
return me_cl;
return NULL;
}
/**
* mei_me_cl_release - free me client
*
* @ref: me_client refcount
*
* Locking: called under "dev->device_lock" lock
*/
static void mei_me_cl_release(struct kref *ref)
{
struct mei_me_client *me_cl =
container_of(ref, struct mei_me_client, refcnt);
kfree(me_cl);
}
/**
* mei_me_cl_put - decrease me client refcount and free client if necessary
*
* @me_cl: me client
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_put(struct mei_me_client *me_cl)
{
if (me_cl)
kref_put(&me_cl->refcnt, mei_me_cl_release);
}
/**
* __mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
* @me_cl: me client
*
* Locking: dev->me_clients_rwsem
*/
static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
{
if (!me_cl)
return;
list_del_init(&me_cl->list);
mei_me_cl_put(me_cl);
}
/**
* mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
* @me_cl: me client
*/
void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
{
down_write(&dev->me_clients_rwsem);
__mei_me_cl_del(dev, me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_add - add me client to the list
*
* @dev: mei device
* @me_cl: me client
*/
void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
{
down_write(&dev->me_clients_rwsem);
list_add(&me_cl->list, &dev->me_clients);
up_write(&dev->me_clients_rwsem);
}
/**
* __mei_me_cl_by_uuid - locate me client by uuid
* increases ref count
*
* @dev: mei device
* @uuid: me client uuid
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid)
{
struct mei_me_client *me_cl;
const uuid_le *pn;
WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
list_for_each_entry(me_cl, &dev->me_clients, list) {
pn = &me_cl->props.protocol_name;
if (uuid_le_cmp(*uuid, *pn) == 0)
return mei_me_cl_get(me_cl);
}
return NULL;
}
/**
* mei_me_cl_by_uuid - locate me client by uuid
* increases ref count
*
* @dev: mei device
* @uuid: me client uuid
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid)
{
struct mei_me_client *me_cl;
down_read(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* mei_me_cl_by_id - locate me client by client id
* increases ref count
*
* @dev: the device structure
* @client_id: me client id
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
struct mei_me_client *__me_cl, *me_cl = NULL;
down_read(&dev->me_clients_rwsem);
list_for_each_entry(__me_cl, &dev->me_clients, list) {
if (__me_cl->client_id == client_id) {
me_cl = mei_me_cl_get(__me_cl);
break;
}
}
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* __mei_me_cl_by_uuid_id - locate me client by client id and uuid
* increases ref count
*
* @dev: the device structure
* @uuid: me client uuid
* @client_id: me client id
*
* Return: me client or null if not found
*
* Locking: dev->me_clients_rwsem
*/
static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id)
{
struct mei_me_client *me_cl;
const uuid_le *pn;
WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
list_for_each_entry(me_cl, &dev->me_clients, list) {
pn = &me_cl->props.protocol_name;
if (uuid_le_cmp(*uuid, *pn) == 0 &&
me_cl->client_id == client_id)
return mei_me_cl_get(me_cl);
}
return NULL;
}
/**
* mei_me_cl_by_uuid_id - locate me client by client id and uuid
* increases ref count
*
* @dev: the device structure
* @uuid: me client uuid
* @client_id: me client id
*
* Return: me client or null if not found
*/
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id)
{
struct mei_me_client *me_cl;
down_read(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* mei_me_cl_rm_by_uuid - remove all me clients matching uuid
*
* @dev: the device structure
* @uuid: me client uuid
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl\n", uuid);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
*
* @dev: the device structure
* @uuid: me client uuid
* @id: me client id
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
{
struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_rm_all - remove all me clients
*
* @dev: the device structure
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_all(struct mei_device *dev)
{
struct mei_me_client *me_cl, *next;
down_write(&dev->me_clients_rwsem);
list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
__mei_me_cl_del(dev, me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
*/
void mei_io_cb_free(struct mei_cl_cb *cb)
{
if (cb == NULL)
return;
list_del(&cb->list);
kvfree(cb->buf.data);
kfree(cb->ext_hdr);
kfree(cb);
}
/**
* mei_tx_cb_enqueue - queue tx callback
*
* @cb: mei callback struct
* @head: an instance of list to queue on
*
* Locking: called under "dev->device_lock" lock
*/
static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
struct list_head *head)
{
list_add_tail(&cb->list, head);
cb->cl->tx_cb_queued++;
}
/**
* mei_tx_cb_dequeue - dequeue tx callback
*
* @cb: mei callback struct to dequeue and free
*
* Locking: called under "dev->device_lock" lock
*/
static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
{
if (!WARN_ON(cb->cl->tx_cb_queued == 0))
cb->cl->tx_cb_queued--;
mei_io_cb_free(cb);
}
/**
* mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
*
* @cl: mei client
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*/
static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->fp == fp) {
cl_vtag->pending_read = true;
return;
}
}
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl: mei client
* @type: operation type
* @fp: pointer to file structure
*
* Return: mei_cl_cb pointer or NULL;
*/
static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
enum mei_cb_file_ops type,
const struct file *fp)
{
struct mei_cl_cb *cb;
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
INIT_LIST_HEAD(&cb->list);
cb->fp = fp;
cb->cl = cl;
cb->buf_idx = 0;
cb->fop_type = type;
cb->vtag = 0;
cb->ext_hdr = NULL;
return cb;
}
/**
* mei_io_list_flush_cl - removes cbs belonging to the cl.
*
* @head: an instance of our list structure
* @cl: host client
*/
static void mei_io_list_flush_cl(struct list_head *head,
const struct mei_cl *cl)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
if (cl == cb->cl) {
list_del_init(&cb->list);
if (cb->fop_type == MEI_FOP_READ)
mei_io_cb_free(cb);
}
}
}
/**
* mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
*
* @head: An instance of our list structure
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*/
static void mei_io_tx_list_free_cl(struct list_head *head,
const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
if (cl == cb->cl && (!fp || fp == cb->fp))
mei_tx_cb_dequeue(cb);
}
}
/**
* mei_io_list_free_fp - free cb from a list that matches file pointer
*
* @head: io list
* @fp: file pointer (matching cb file object), may be NULL
*/
static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list)
if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
}
/**
* mei_cl_free_pending - free pending cb
*
* @cl: host client
*/
static void mei_cl_free_pending(struct mei_cl *cl)
{
struct mei_cl_cb *cb;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
mei_io_cb_free(cb);
}
/**
* mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
* @cl: host client
* @length: size of the buffer
* @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
*/
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
cb = mei_io_cb_init(cl, fop_type, fp);
if (!cb)
return NULL;
if (length == 0)
return cb;
cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
if (!cb->buf.data) {
mei_io_cb_free(cb);
return NULL;
}
cb->buf.size = length;
return cb;
}
/**
* mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
* and enqueuing of the control commands cb
*
* @cl: host client
* @length: size of the buffer
* @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
* Locking: called under "dev->device_lock" lock
*/
struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
/* for RX always allocate at least client's mtu */
if (length)
length = max_t(size_t, length, mei_cl_mtu(cl));
cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
if (!cb)
return NULL;
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
return cb;
}
/**
* mei_cl_read_cb - find this cl's callback in the read list
* for a specific file
*
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*
* Return: cb on success, NULL if cb is not found
*/
struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *ret_cb = NULL;
spin_lock(&cl->rd_completed_lock);
list_for_each_entry(cb, &cl->rd_completed, list)
if (!fp || fp == cb->fp) {
ret_cb = cb;
break;
}
spin_unlock(&cl->rd_completed_lock);
return ret_cb;
}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*
* Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
*/
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
/* free pending and control cb only in final flush */
if (!fp) {
mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
mei_cl_free_pending(cl);
}
spin_lock(&cl->rd_completed_lock);
mei_io_list_free_fp(&cl->rd_completed, fp);
spin_unlock(&cl->rd_completed_lock);
return 0;
}
/**
* mei_cl_init - initializes cl.
*
* @cl: host client to be initialized
* @dev: mei device
*/
static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
memset(cl, 0, sizeof(*cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
init_waitqueue_head(&cl->ev_wait);
INIT_LIST_HEAD(&cl->vtag_map);
spin_lock_init(&cl->rd_completed_lock);
INIT_LIST_HEAD(&cl->rd_completed);
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
cl->writing_state = MEI_IDLE;
cl->state = MEI_FILE_UNINITIALIZED;
cl->dev = dev;
}
/**
* mei_cl_allocate - allocates cl structure and sets it up.
*
* @dev: mei device
* Return: The allocated file or NULL on failure
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return NULL;
mei_cl_init(cl, dev);
return cl;
}
/**
* mei_cl_link - allocate host id in the host map
*
* @cl: host client
*
* Return: 0 on success
* -EINVAL on incorrect values
* -EMFILE if open count exceeded.
*/
int mei_cl_link(struct mei_cl *cl)
{
struct mei_device *dev;
int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
dev_err(dev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
dev->open_handle_count++;
cl->host_client_id = id;
list_add_tail(&cl->link, &dev->file_list);
set_bit(id, dev->host_clients_map);
cl->state = MEI_FILE_INITIALIZING;
cl_dbg(dev, cl, "link cl\n");
return 0;
}
/**
* mei_cl_unlink - remove host client from the list
*
* @cl: host client
*
* Return: always 0
*/
int mei_cl_unlink(struct mei_cl *cl)
{
struct mei_device *dev;
/* don't shout on error exit path */
if (!cl)
return 0;
if (WARN_ON(!cl->dev))
return 0;
dev = cl->dev;
cl_dbg(dev, cl, "unlink client");
if (cl->state == MEI_FILE_UNINITIALIZED)
return 0;
if (dev->open_handle_count > 0)
dev->open_handle_count--;
/* never clear the 0 bit */
if (cl->host_client_id)
clear_bit(cl->host_client_id, dev->host_clients_map);
list_del_init(&cl->link);
cl->state = MEI_FILE_UNINITIALIZED;
cl->writing_state = MEI_IDLE;
WARN_ON(!list_empty(&cl->rd_completed) ||
!list_empty(&cl->rd_pending) ||
!list_empty(&cl->link));
return 0;
}
void mei_host_client_init(struct mei_device *dev)
{
mei_set_devstate(dev, MEI_DEV_ENABLED);
dev->reset_count = 0;
schedule_work(&dev->bus_rescan_work);
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
pm_request_autosuspend(dev->dev);
}
/**
* mei_hbuf_acquire - try to acquire host buffer
*
* @dev: the device structure
* Return: true if host buffer was acquired
*/
bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n");
return false;
}
if (!dev->hbuf_is_ready) {
dev_dbg(dev->dev, "hbuf is not ready\n");
return false;
}
dev->hbuf_is_ready = false;
return true;
}
/**
* mei_cl_wake_all - wake up readers, writers and event waiters so
* they can be interrupted
*
* @cl: host client
*/
static void mei_cl_wake_all(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
/* synchronized under device mutex */
if (waitqueue_active(&cl->rx_wait)) {
cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->tx_wait)) {
cl_dbg(dev, cl, "Waking up writing client!\n");
wake_up_interruptible(&cl->tx_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->ev_wait)) {
cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
wake_up_interruptible(&cl->ev_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->wait)) {
cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
wake_up(&cl->wait);
}
}
/**
* mei_cl_set_disconnected - set disconnected state and clear
* associated states and resources
*
* @cl: host client
*/
static void mei_cl_set_disconnected(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_DISCONNECTED ||
cl->state <= MEI_FILE_INITIALIZING)
return;
cl->state = MEI_FILE_DISCONNECTED;
mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
mei_cl_wake_all(cl);
cl->rx_flow_ctrl_creds = 0;
cl->tx_flow_ctrl_creds = 0;
cl->timer_count = 0;
if (!cl->me_cl)
return;
if (!WARN_ON(cl->me_cl->connect_count == 0))
cl->me_cl->connect_count--;
if (cl->me_cl->connect_count == 0)
cl->me_cl->tx_flow_ctrl_creds = 0;
mei_me_cl_put(cl->me_cl);
cl->me_cl = NULL;
}
static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
{
if (!mei_me_cl_get(me_cl))
return -ENOENT;
/* only one connection is allowed for fixed address clients */
if (me_cl->props.fixed_address) {
if (me_cl->connect_count) {
mei_me_cl_put(me_cl);
return -EBUSY;
}
}
cl->me_cl = me_cl;
cl->state = MEI_FILE_CONNECTING;
cl->me_cl->connect_count++;
return 0;
}
/*
* mei_cl_send_disconnect - send disconnect request
*
* @cl: host client
* @cb: callback block
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
int ret;
dev = cl->dev;
ret = mei_hbm_cl_disconnect_req(dev, cl);
cl->status = ret;
if (ret) {
cl->state = MEI_FILE_DISCONNECT_REPLY;
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_cl_irq_disconnect - processes close related operation from
* interrupt thread context - send disconnect request
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_cl_send_disconnect(cl, cb);
if (ret)
list_move_tail(&cb->list, cmpl_list);
return ret;
}
/**
* __mei_cl_disconnect - disconnect host client from the me one
* internal function runtime pm has to be already acquired
*
* @cl: host client
*
* Return: 0 on success, <0 on failure.
*/
static int __mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
dev = cl->dev;
cl->state = MEI_FILE_DISCONNECTING;
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
rets = mei_cl_send_disconnect(cl, cb);
if (rets) {
cl_err(dev, cl, "failed to disconnect.\n");
goto out;
}
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl->state == MEI_FILE_DISCONNECTED,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
rets = cl->status;
if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
cl->state != MEI_FILE_DISCONNECTED) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
out:
/* we disconnect also on error */
mei_cl_set_disconnected(cl);
if (!rets)
cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
cl_dbg(dev, cl, "disconnecting");
if (!mei_cl_is_connected(cl))
return 0;
if (mei_cl_is_fixed_address(cl)) {
mei_cl_set_disconnected(cl);
return 0;
}
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl);
return 0;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = __mei_cl_disconnect(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return rets;
}
/**
* mei_cl_is_other_connecting - checks if other
* client with the same me client id is connecting
*
* @cl: private data of the file object
*
* Return: true if other client is connected, false - otherwise.
*/
static bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
dev = cl->dev;
list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
if (cb->fop_type == MEI_FOP_CONNECT &&
mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
return true;
}
return false;
}
/**
* mei_cl_send_connect - send connect request
*
* @cl: host client
* @cb: callback block
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
int ret;
dev = cl->dev;
ret = mei_hbm_cl_connect_req(dev, cl);
cl->status = ret;
if (ret) {
cl->state = MEI_FILE_DISCONNECT_REPLY;
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_cl_irq_connect - send connect request in irq_thread context
*
* @cl: host client
* @cb: callback block
* @cmpl_list: complete list
*
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int rets;
if (mei_cl_is_other_connecting(cl))
return 0;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
rets = mei_cl_send_connect(cl, cb);
if (rets)
list_move_tail(&cb->list, cmpl_list);
return rets;
}
/**
* mei_cl_connect - connect host client to the me one
*
* @cl: host client
* @me_cl: me client
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev || !me_cl))
return -ENODEV;
dev = cl->dev;
rets = mei_cl_set_connecting(cl, me_cl);
if (rets)
goto nortpm;
if (mei_cl_is_fixed_address(cl)) {
cl->state = MEI_FILE_CONNECTED;
rets = 0;
goto nortpm;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
rets = mei_cl_send_connect(cl, cb);
if (rets)
goto out;
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
/* ignore disconnect return valuue;
* in case of failure reset will be invoked
*/
__mei_cl_disconnect(cl);
rets = -EFAULT;
goto out;
}
/* timeout or something went really wrong */
if (!cl->status)
cl->status = -EFAULT;
}
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
nortpm:
if (!mei_cl_is_connected(cl))
mei_cl_set_disconnected(cl);
return rets;
}
/**
* mei_cl_alloc_linked - allocate and link host client
*
* @dev: the device structure
*
* Return: cl on success ERR_PTR on failure
*/
struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
{
struct mei_cl *cl;
int ret;
cl = mei_cl_allocate(dev);
if (!cl) {
ret = -ENOMEM;
goto err;
}
ret = mei_cl_link(cl);
if (ret)
goto err;
return cl;
err:
kfree(cl);
return ERR_PTR(ret);
}
/**
* mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
*
* @cl: host client
*
* Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
*/
static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
if (cl->tx_flow_ctrl_creds > 0)
return 1;
if (mei_cl_is_fixed_address(cl))
return 1;
if (mei_cl_is_single_recv_buf(cl)) {
if (cl->me_cl->tx_flow_ctrl_creds > 0)
return 1;
}
return 0;
}
/**
* mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
* for a client
*
* @cl: host client
*
* Return:
* 0 on success
* -EINVAL when ctrl credits are <= 0
*/
static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
if (mei_cl_is_fixed_address(cl))
return 0;
if (mei_cl_is_single_recv_buf(cl)) {
if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
cl->me_cl->tx_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
cl->tx_flow_ctrl_creds--;
}
return 0;
}
/**
* mei_cl_vtag_alloc - allocate and fill the vtag structure
*
* @fp: pointer to file structure
* @vtag: vm tag
*
* Return:
* * Pointer to allocated struct - on success
* * ERR_PTR(-ENOMEM) on memory allocation failure
*/
struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
{
struct mei_cl_vtag *cl_vtag;
cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
if (!cl_vtag)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cl_vtag->list);
cl_vtag->vtag = vtag;
cl_vtag->fp = fp;
return cl_vtag;
}
/**
* mei_cl_fp_by_vtag - obtain the file pointer by vtag
*
* @cl: host client
* @vtag: virtual tag
*
* Return:
* * A file pointer - on success
* * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
*/
const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list)
/* The client on bus has one fixed fp */
if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag)
return vtag_l->fp;
return ERR_PTR(-ENOENT);
}
/**
* mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
*
* @cl: host client
* @vtag: vm tag
*/
static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) {
/* The client on bus has one fixed vtag map */
if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag) {
vtag_l->pending_read = false;
break;
}
}
}
/**
* mei_cl_read_vtag_add_fc - add flow control for next pending reader
* in the vtag list
*
* @cl: host client
*/
static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->pending_read) {
if (mei_cl_enqueue_ctrl_wr_cb(cl,
mei_cl_mtu(cl),
MEI_FOP_READ,
cl_vtag->fp))
cl->rx_flow_ctrl_creds++;
break;
}
}
}
/**
* mei_cl_vt_support_check - check if client support vtags
*
* @cl: host client
*
* Return:
* * 0 - supported, or not connected at all
* * -EOPNOTSUPP - vtags are not supported by client
*/
int mei_cl_vt_support_check(const struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (!dev->hbm_f_vt_supported)
return -EOPNOTSUPP;
if (!cl->me_cl)
return 0;
return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
}
/**
* mei_cl_add_rd_completed - add read completed callback to list with lock
* and vtag check
*
* @cl: host client
* @cb: callback block
*
*/
void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
{
const struct file *fp;
if (!mei_cl_vt_support_check(cl)) {
fp = mei_cl_fp_by_vtag(cl, cb->vtag);
if (IS_ERR(fp)) {
/* client already disconnected, discarding */
mei_io_cb_free(cb);
return;
}
cb->fp = fp;
mei_cl_reset_read_by_vtag(cl, cb->vtag);
mei_cl_read_vtag_add_fc(cl);
}
spin_lock(&cl->rd_completed_lock);
list_add_tail(&cb->list, &cl->rd_completed);
spin_unlock(&cl->rd_completed_lock);
}
/**
* mei_cl_del_rd_completed - free read completed callback with lock
*
* @cl: host client
* @cb: callback block
*
*/
void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
{
spin_lock(&cl->rd_completed_lock);
mei_io_cb_free(cb);
spin_unlock(&cl->rd_completed_lock);
}
/**
* mei_cl_notify_fop2req - convert fop to proper request
*
* @fop: client notification start response command
*
* Return: MEI_HBM_NOTIFICATION_START/STOP
*/
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
{
if (fop == MEI_FOP_NOTIFY_START)
return MEI_HBM_NOTIFICATION_START;
else
return MEI_HBM_NOTIFICATION_STOP;
}
/**
* mei_cl_notify_req2fop - convert notification request top file operation type
*
* @req: hbm notification request type
*
* Return: MEI_FOP_NOTIFY_START/STOP
*/
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
{
if (req == MEI_HBM_NOTIFICATION_START)
return MEI_FOP_NOTIFY_START;
else
return MEI_FOP_NOTIFY_STOP;
}
/**
* mei_cl_irq_notify - send notification request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
bool request;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
request = mei_cl_notify_fop2req(cb->fop_type);
ret = mei_hbm_cl_notify_req(dev, cl, request);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
/**
* mei_cl_notify_request - send notification stop/start request
*
* @cl: host client
* @fp: associate request with file
* @request: 1 for start or 0 for stop
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_request(struct mei_cl *cl,
const struct file *fp, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
enum mei_cb_file_ops fop_type;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_ev_supported) {
cl_dbg(dev, cl, "notifications not supported\n");
return -EOPNOTSUPP;
}
if (!mei_cl_is_connected(cl))
return -ENODEV;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
fop_type = mei_cl_notify_req2fop(request);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_notify_req(dev, cl, request)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->notify_en == request ||
cl->status ||
!mei_cl_is_connected(cl),
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->notify_en != request && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_notify - raise notification
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*/
void mei_cl_notify(struct mei_cl *cl)
{
struct mei_device *dev;
if (!cl || !cl->dev)
return;
dev = cl->dev;
if (!cl->notify_en)
return;
cl_dbg(dev, cl, "notify event");
cl->notify_ev = true;
if (!mei_cl_bus_notify_event(cl))
wake_up_interruptible(&cl->ev_wait);
if (cl->ev_async)
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
}
/**
* mei_cl_notify_get - get or wait for notification event
*
* @cl: host client
* @block: this request is blocking
* @notify_ev: true if notification event was received
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
{
struct mei_device *dev;
int rets;
*notify_ev = false;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_ev_supported) {
cl_dbg(dev, cl, "notifications not supported\n");
return -EOPNOTSUPP;
}
if (!mei_cl_is_connected(cl))
return -ENODEV;
if (cl->notify_ev)
goto out;
if (!block)
return -EAGAIN;
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
mutex_lock(&dev->device_lock);
if (rets < 0)
return rets;
out:
*notify_ev = cl->notify_ev;
cl->notify_ev = false;
return 0;
}
/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
* @length: number of bytes to read
* @fp: pointer to file structure
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!mei_cl_is_connected(cl))
return -ENODEV;
if (!mei_me_cl_is_active(cl->me_cl)) {
cl_err(dev, cl, "no such me client\n");
return -ENOTTY;
}
if (mei_cl_is_fixed_address(cl))
return 0;
/* HW currently supports only one pending read */
if (cl->rx_flow_ctrl_creds) {
mei_cl_set_read_by_fp(cl, fp);
return -EBUSY;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
mei_cl_set_read_by_fp(cl, fp);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
rets = 0;
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
goto out;
list_move_tail(&cb->list, &cl->rd_pending);
}
cl->rx_flow_ctrl_creds++;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
nortpm:
if (rets)
mei_io_cb_free(cb);
return rets;
}
static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
{
struct mei_ext_hdr_vtag *vtag_hdr = ext;
vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
vtag_hdr->vtag = vtag;
vtag_hdr->reserved = 0;
return vtag_hdr->hdr.length;
}
static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
{
return ext && ext->type == MEI_EXT_HDR_GSC;
}
static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
{
memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
return ext->length;
}
/**
* mei_msg_hdr_init - allocate and initialize mei message header
*
* @cb: message callback structure
*
* Return: a pointer to initialized header or ERR_PTR on failure
*/
static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
size_t hdr_len;
struct mei_ext_meta_hdr *meta;
struct mei_msg_hdr *mei_hdr;
bool is_ext, is_hbm, is_gsc, is_vtag;
struct mei_ext_hdr *next_ext;
if (!cb)
return ERR_PTR(-EINVAL);
/* Extended header for vtag is attached only on the first fragment */
is_vtag = (cb->vtag && cb->buf_idx == 0);
is_hbm = cb->cl->me_cl->client_id == 0;
is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
is_ext = is_vtag || is_gsc;
/* Compute extended header size */
hdr_len = sizeof(*mei_hdr);
if (!is_ext)
goto setup_hdr;
hdr_len += sizeof(*meta);
if (is_vtag)
hdr_len += sizeof(struct mei_ext_hdr_vtag);
if (is_gsc)
hdr_len += mei_ext_hdr_len(cb->ext_hdr);
setup_hdr:
mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
if (!mei_hdr)
return ERR_PTR(-ENOMEM);
mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
mei_hdr->me_addr = mei_cl_me_id(cb->cl);
mei_hdr->internal = cb->internal;
mei_hdr->extended = is_ext;
if (!is_ext)
goto out;
meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
meta->size = 0;
next_ext = (struct mei_ext_hdr *)meta->hdrs;
if (is_vtag) {
meta->count++;
meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
next_ext = mei_ext_next(next_ext);
}
if (is_gsc) {
meta->count++;
meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
next_ext = mei_ext_next(next_ext);
}
out:
mei_hdr->length = hdr_len - sizeof(*mei_hdr);
return mei_hdr;
}
/**
* mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise error.
*/
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr *mei_hdr = NULL;
size_t hdr_len;
size_t hbuf_len, dr_len;
size_t buf_len = 0;
size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
int rets;
bool first_chunk;
const void *data = NULL;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
buf = &cb->buf;
first_chunk = cb->buf_idx == 0;
rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
if (rets < 0)
goto err;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
return 0;
}
if (buf->data) {
buf_len = buf->size - cb->buf_idx;
data = buf->data + cb->buf_idx;
}
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
rets = -EOVERFLOW;
goto err;
}
hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
rets = PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
/**
* Split the message only if we can write the whole host buffer
* otherwise wait for next time the host buffer is empty.
*/
if (hdr_len + buf_len <= hbuf_len) {
data_len = buf_len;
mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr->dma_ring = 1;
if (buf_len > dr_len)
buf_len = dr_len;
else
mei_hdr->msg_complete = 1;
data_len = sizeof(dma_len);
dma_len = buf_len;
data = &dma_len;
} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
buf_len = hbuf_len - hdr_len;
data_len = buf_len;
} else {
kfree(mei_hdr);
return 0;
}
mei_hdr->length += data_len;
if (mei_hdr->dma_ring && buf->data)
mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
if (rets)
goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
cb->buf_idx += buf_len;
if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
rets = -EIO;
goto err;
}
}
if (mei_hdr->msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list);
kfree(mei_hdr);
return 0;
err:
kfree(mei_hdr);
cl->status = rets;
list_move_tail(&cb->list, cmpl_list);
return rets;
}
/**
* mei_cl_write - submit a write cb to mei device
* assumes device_lock is locked
*
* @cl: host client
* @cb: write callback with filled data
* @timeout: send timeout in milliseconds.
* effective only for blocking writes: the cb->blocking is set.
* set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
*
* Return: number of bytes sent on success, <0 on failure.
*/
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr *mei_hdr = NULL;
size_t hdr_len;
size_t hbuf_len, dr_len;
size_t buf_len;
size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
ssize_t rets;
bool blocking;
const void *data;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
if (WARN_ON(!cb))
return -EINVAL;
dev = cl->dev;
buf = &cb->buf;
buf_len = buf->size;
cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
blocking = cb->blocking;
data = buf->data;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
rets = mei_cl_tx_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
rets = PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
rets = buf_len;
goto out;
}
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
rets = buf_len;
goto out;
}
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
buf_len = -EOVERFLOW;
goto out;
}
hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
if (hdr_len + buf_len <= hbuf_len) {
data_len = buf_len;
mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr->dma_ring = 1;
if (buf_len > dr_len)
buf_len = dr_len;
else
mei_hdr->msg_complete = 1;
data_len = sizeof(dma_len);
dma_len = buf_len;
data = &dma_len;
} else {
buf_len = hbuf_len - hdr_len;
data_len = buf_len;
}
mei_hdr->length += data_len;
if (mei_hdr->dma_ring && buf->data)
mei_dma_ring_write(dev, buf->data, buf_len);
rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
if (rets)
goto err;
rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
if (rets)
goto err;
cl->writing_state = MEI_WRITING;
cb->buf_idx = buf_len;
/* restore return value */
buf_len = buf->size;
out:
if (mei_hdr->msg_complete)
mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
else
mei_tx_cb_enqueue(cb, &dev->write_list);
cb = NULL;
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible_timeout(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE ||
(!mei_cl_is_connected(cl)),
msecs_to_jiffies(timeout));
mutex_lock(&dev->device_lock);
/* clean all queue on timeout as something fatal happened */
if (rets == 0) {
rets = -ETIME;
mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
}
/* wait_event_interruptible returns -ERESTARTSYS */
if (rets > 0)
rets = 0;
if (rets) {
if (signal_pending(current))
rets = -EINTR;
goto err;
}
if (cl->writing_state != MEI_WRITE_COMPLETE) {
rets = -EFAULT;
goto err;
}
}
rets = buf_len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
free:
mei_io_cb_free(cb);
kfree(mei_hdr);
return rets;
}
/**
* mei_cl_complete - processes completed operation for a client
*
* @cl: private data of the file object.
* @cb: callback block.
*/
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev = cl->dev;
switch (cb->fop_type) {
case MEI_FOP_WRITE:
mei_tx_cb_dequeue(cb);
cl->writing_state = MEI_WRITE_COMPLETE;
if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
} else {
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
}
break;
case MEI_FOP_READ:
mei_cl_add_rd_completed(cl, cb);
if (!mei_cl_is_fixed_address(cl) &&
!WARN_ON(!cl->rx_flow_ctrl_creds))
cl->rx_flow_ctrl_creds--;
if (!mei_cl_bus_rx_event(cl))
wake_up_interruptible(&cl->rx_wait);
break;
case MEI_FOP_CONNECT:
case MEI_FOP_DISCONNECT:
case MEI_FOP_NOTIFY_STOP:
case MEI_FOP_NOTIFY_START:
case MEI_FOP_DMA_MAP:
case MEI_FOP_DMA_UNMAP:
if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
break;
case MEI_FOP_DISCONNECT_RSP:
mei_io_cb_free(cb);
mei_cl_set_disconnected(cl);
break;
default:
BUG_ON(0);
}
}
/**
* mei_cl_all_disconnect - disconnect forcefully all connected clients
*
* @dev: mei device
*/
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->file_list, link)
if (cl->dma.buffer_id == buffer_id)
return cl;
return NULL;
}
/**
* mei_cl_irq_dma_map - send client dma map request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_dma_map_req(dev, cl);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
/**
* mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_dma_unmap_req(dev, cl);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
{
cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
&cl->dma.daddr, GFP_KERNEL);
if (!cl->dma.vaddr)
return -ENOMEM;
cl->dma.buffer_id = buf_id;
cl->dma.size = size;
return 0;
}
static void mei_cl_dma_free(struct mei_cl *cl)
{
cl->dma.buffer_id = 0;
dmam_free_coherent(cl->dev->dev,
cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
cl->dma.size = 0;
cl->dma.vaddr = NULL;
cl->dma.daddr = 0;
}
/**
* mei_cl_dma_alloc_and_map - send client dma map request
*
* @cl: host client
* @fp: pointer to file structure
* @buffer_id: id of the mapped buffer
* @size: size of the buffer
*
* Locking: called under "dev->device_lock" lock
*
* Return:
* * -ENODEV
* * -EINVAL
* * -EOPNOTSUPP
* * -EPROTO
* * -ENOMEM;
*/
int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
u8 buffer_id, size_t size)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_cd_supported) {
cl_dbg(dev, cl, "client dma is not supported\n");
return -EOPNOTSUPP;
}
if (buffer_id == 0)
return -EINVAL;
if (mei_cl_is_connected(cl))
return -EPROTO;
if (cl->dma_mapped)
return -EPROTO;
if (mei_cl_dma_map_find(dev, buffer_id)) {
cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
cl->dma.buffer_id);
return -EPROTO;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = mei_cl_dma_alloc(cl, buffer_id, size);
if (rets) {
pm_runtime_put_noidle(dev->dev);
return rets;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_dma_map_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
cl->status = 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!cl->dma_mapped && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
out:
if (rets)
mei_cl_dma_free(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_dma_unmap - send client dma unmap request
*
* @cl: host client
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_cd_supported) {
cl_dbg(dev, cl, "client dma is not supported\n");
return -EOPNOTSUPP;
}
/* do not allow unmap for connected client */
if (mei_cl_is_connected(cl))
return -EPROTO;
if (!cl->dma_mapped)
return -EPROTO;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
cl->status = 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->dma_mapped && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
if (!rets)
mei_cl_dma_free(cl);
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
|
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
#include "dce/dce_opp.h"
#include "dce110_opp_v.h"
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
static const struct opp_funcs funcs = {
.opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
.opp_destroy = dce110_opp_destroy,
.opp_program_fmt = dce110_opp_program_fmt,
.opp_program_bit_depth_reduction =
dce110_opp_program_bit_depth_reduction
};
void dce110_opp_v_construct(struct dce110_opp *opp110,
struct dc_context *ctx)
{
opp110->base.funcs = &funcs;
opp110->base.ctx = ctx;
}
|
// SPDX-License-Identifier: GPL-2.0+
/*
* virtio-snd: Virtio sound device
* Copyright (C) 2021 OpenSynergy GmbH
*/
#include <linux/moduleparam.h>
#include <linux/virtio_config.h>
#include "virtio_card.h"
static u32 pcm_buffer_ms = 160;
module_param(pcm_buffer_ms, uint, 0644);
MODULE_PARM_DESC(pcm_buffer_ms, "PCM substream buffer time in milliseconds");
static u32 pcm_periods_min = 2;
module_param(pcm_periods_min, uint, 0644);
MODULE_PARM_DESC(pcm_periods_min, "Minimum number of PCM periods");
static u32 pcm_periods_max = 16;
module_param(pcm_periods_max, uint, 0644);
MODULE_PARM_DESC(pcm_periods_max, "Maximum number of PCM periods");
static u32 pcm_period_ms_min = 10;
module_param(pcm_period_ms_min, uint, 0644);
MODULE_PARM_DESC(pcm_period_ms_min, "Minimum PCM period time in milliseconds");
static u32 pcm_period_ms_max = 80;
module_param(pcm_period_ms_max, uint, 0644);
MODULE_PARM_DESC(pcm_period_ms_max, "Maximum PCM period time in milliseconds");
/* Map for converting VirtIO format to ALSA format. */
static const snd_pcm_format_t g_v2a_format_map[] = {
[VIRTIO_SND_PCM_FMT_IMA_ADPCM] = SNDRV_PCM_FORMAT_IMA_ADPCM,
[VIRTIO_SND_PCM_FMT_MU_LAW] = SNDRV_PCM_FORMAT_MU_LAW,
[VIRTIO_SND_PCM_FMT_A_LAW] = SNDRV_PCM_FORMAT_A_LAW,
[VIRTIO_SND_PCM_FMT_S8] = SNDRV_PCM_FORMAT_S8,
[VIRTIO_SND_PCM_FMT_U8] = SNDRV_PCM_FORMAT_U8,
[VIRTIO_SND_PCM_FMT_S16] = SNDRV_PCM_FORMAT_S16_LE,
[VIRTIO_SND_PCM_FMT_U16] = SNDRV_PCM_FORMAT_U16_LE,
[VIRTIO_SND_PCM_FMT_S18_3] = SNDRV_PCM_FORMAT_S18_3LE,
[VIRTIO_SND_PCM_FMT_U18_3] = SNDRV_PCM_FORMAT_U18_3LE,
[VIRTIO_SND_PCM_FMT_S20_3] = SNDRV_PCM_FORMAT_S20_3LE,
[VIRTIO_SND_PCM_FMT_U20_3] = SNDRV_PCM_FORMAT_U20_3LE,
[VIRTIO_SND_PCM_FMT_S24_3] = SNDRV_PCM_FORMAT_S24_3LE,
[VIRTIO_SND_PCM_FMT_U24_3] = SNDRV_PCM_FORMAT_U24_3LE,
[VIRTIO_SND_PCM_FMT_S20] = SNDRV_PCM_FORMAT_S20_LE,
[VIRTIO_SND_PCM_FMT_U20] = SNDRV_PCM_FORMAT_U20_LE,
[VIRTIO_SND_PCM_FMT_S24] = SNDRV_PCM_FORMAT_S24_LE,
[VIRTIO_SND_PCM_FMT_U24] = SNDRV_PCM_FORMAT_U24_LE,
[VIRTIO_SND_PCM_FMT_S32] = SNDRV_PCM_FORMAT_S32_LE,
[VIRTIO_SND_PCM_FMT_U32] = SNDRV_PCM_FORMAT_U32_LE,
[VIRTIO_SND_PCM_FMT_FLOAT] = SNDRV_PCM_FORMAT_FLOAT_LE,
[VIRTIO_SND_PCM_FMT_FLOAT64] = SNDRV_PCM_FORMAT_FLOAT64_LE,
[VIRTIO_SND_PCM_FMT_DSD_U8] = SNDRV_PCM_FORMAT_DSD_U8,
[VIRTIO_SND_PCM_FMT_DSD_U16] = SNDRV_PCM_FORMAT_DSD_U16_LE,
[VIRTIO_SND_PCM_FMT_DSD_U32] = SNDRV_PCM_FORMAT_DSD_U32_LE,
[VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME] =
SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
};
/* Map for converting VirtIO frame rate to ALSA frame rate. */
struct virtsnd_v2a_rate {
unsigned int alsa_bit;
unsigned int rate;
};
static const struct virtsnd_v2a_rate g_v2a_rate_map[] = {
[VIRTIO_SND_PCM_RATE_5512] = { SNDRV_PCM_RATE_5512, 5512 },
[VIRTIO_SND_PCM_RATE_8000] = { SNDRV_PCM_RATE_8000, 8000 },
[VIRTIO_SND_PCM_RATE_11025] = { SNDRV_PCM_RATE_11025, 11025 },
[VIRTIO_SND_PCM_RATE_16000] = { SNDRV_PCM_RATE_16000, 16000 },
[VIRTIO_SND_PCM_RATE_22050] = { SNDRV_PCM_RATE_22050, 22050 },
[VIRTIO_SND_PCM_RATE_32000] = { SNDRV_PCM_RATE_32000, 32000 },
[VIRTIO_SND_PCM_RATE_44100] = { SNDRV_PCM_RATE_44100, 44100 },
[VIRTIO_SND_PCM_RATE_48000] = { SNDRV_PCM_RATE_48000, 48000 },
[VIRTIO_SND_PCM_RATE_64000] = { SNDRV_PCM_RATE_64000, 64000 },
[VIRTIO_SND_PCM_RATE_88200] = { SNDRV_PCM_RATE_88200, 88200 },
[VIRTIO_SND_PCM_RATE_96000] = { SNDRV_PCM_RATE_96000, 96000 },
[VIRTIO_SND_PCM_RATE_176400] = { SNDRV_PCM_RATE_176400, 176400 },
[VIRTIO_SND_PCM_RATE_192000] = { SNDRV_PCM_RATE_192000, 192000 }
};
/**
* virtsnd_pcm_build_hw() - Parse substream config and build HW descriptor.
* @vss: VirtIO substream.
* @info: VirtIO substream information entry.
*
* Context: Any context.
* Return: 0 on success, -EINVAL if configuration is invalid.
*/
static int virtsnd_pcm_build_hw(struct virtio_pcm_substream *vss,
struct virtio_snd_pcm_info *info)
{
struct virtio_device *vdev = vss->snd->vdev;
unsigned int i;
u64 values;
size_t sample_max = 0;
size_t sample_min = 0;
vss->features = le32_to_cpu(info->features);
/*
* TODO: set SNDRV_PCM_INFO_{BATCH,BLOCK_TRANSFER} if device supports
* only message-based transport.
*/
vss->hw.info =
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_REWINDS |
SNDRV_PCM_INFO_SYNC_APPLPTR;
if (!info->channels_min || info->channels_min > info->channels_max) {
dev_err(&vdev->dev,
"SID %u: invalid channel range [%u %u]\n",
vss->sid, info->channels_min, info->channels_max);
return -EINVAL;
}
vss->hw.channels_min = info->channels_min;
vss->hw.channels_max = info->channels_max;
values = le64_to_cpu(info->formats);
vss->hw.formats = 0;
for (i = 0; i < ARRAY_SIZE(g_v2a_format_map); ++i)
if (values & (1ULL << i)) {
snd_pcm_format_t alsa_fmt = g_v2a_format_map[i];
int bytes = snd_pcm_format_physical_width(alsa_fmt) / 8;
if (!sample_min || sample_min > bytes)
sample_min = bytes;
if (sample_max < bytes)
sample_max = bytes;
vss->hw.formats |= pcm_format_to_bits(alsa_fmt);
}
if (!vss->hw.formats) {
dev_err(&vdev->dev,
"SID %u: no supported PCM sample formats found\n",
vss->sid);
return -EINVAL;
}
values = le64_to_cpu(info->rates);
vss->hw.rates = 0;
for (i = 0; i < ARRAY_SIZE(g_v2a_rate_map); ++i)
if (values & (1ULL << i)) {
if (!vss->hw.rate_min ||
vss->hw.rate_min > g_v2a_rate_map[i].rate)
vss->hw.rate_min = g_v2a_rate_map[i].rate;
if (vss->hw.rate_max < g_v2a_rate_map[i].rate)
vss->hw.rate_max = g_v2a_rate_map[i].rate;
vss->hw.rates |= g_v2a_rate_map[i].alsa_bit;
}
if (!vss->hw.rates) {
dev_err(&vdev->dev,
"SID %u: no supported PCM frame rates found\n",
vss->sid);
return -EINVAL;
}
vss->hw.periods_min = pcm_periods_min;
vss->hw.periods_max = pcm_periods_max;
/*
* We must ensure that there is enough space in the buffer to store
* pcm_buffer_ms ms for the combination (Cmax, Smax, Rmax), where:
* Cmax = maximum supported number of channels,
* Smax = maximum supported sample size in bytes,
* Rmax = maximum supported frame rate.
*/
vss->hw.buffer_bytes_max =
PAGE_ALIGN(sample_max * vss->hw.channels_max * pcm_buffer_ms *
(vss->hw.rate_max / MSEC_PER_SEC));
/*
* We must ensure that the minimum period size is enough to store
* pcm_period_ms_min ms for the combination (Cmin, Smin, Rmin), where:
* Cmin = minimum supported number of channels,
* Smin = minimum supported sample size in bytes,
* Rmin = minimum supported frame rate.
*/
vss->hw.period_bytes_min =
sample_min * vss->hw.channels_min * pcm_period_ms_min *
(vss->hw.rate_min / MSEC_PER_SEC);
/*
* We must ensure that the maximum period size is enough to store
* pcm_period_ms_max ms for the combination (Cmax, Smax, Rmax).
*/
vss->hw.period_bytes_max =
sample_max * vss->hw.channels_max * pcm_period_ms_max *
(vss->hw.rate_max / MSEC_PER_SEC);
return 0;
}
/**
* virtsnd_pcm_find() - Find the PCM device for the specified node ID.
* @snd: VirtIO sound device.
* @nid: Function node ID.
*
* Context: Any context.
* Return: a pointer to the PCM device or ERR_PTR(-ENOENT).
*/
struct virtio_pcm *virtsnd_pcm_find(struct virtio_snd *snd, u32 nid)
{
struct virtio_pcm *vpcm;
list_for_each_entry(vpcm, &snd->pcm_list, list)
if (vpcm->nid == nid)
return vpcm;
return ERR_PTR(-ENOENT);
}
/**
* virtsnd_pcm_find_or_create() - Find or create the PCM device for the
* specified node ID.
* @snd: VirtIO sound device.
* @nid: Function node ID.
*
* Context: Any context that permits to sleep.
* Return: a pointer to the PCM device or ERR_PTR(-errno).
*/
struct virtio_pcm *virtsnd_pcm_find_or_create(struct virtio_snd *snd, u32 nid)
{
struct virtio_device *vdev = snd->vdev;
struct virtio_pcm *vpcm;
vpcm = virtsnd_pcm_find(snd, nid);
if (!IS_ERR(vpcm))
return vpcm;
vpcm = devm_kzalloc(&vdev->dev, sizeof(*vpcm), GFP_KERNEL);
if (!vpcm)
return ERR_PTR(-ENOMEM);
vpcm->nid = nid;
list_add_tail(&vpcm->list, &snd->pcm_list);
return vpcm;
}
/**
* virtsnd_pcm_validate() - Validate if the device can be started.
* @vdev: VirtIO parent device.
*
* Context: Any context.
* Return: 0 on success, -EINVAL on failure.
*/
int virtsnd_pcm_validate(struct virtio_device *vdev)
{
if (pcm_periods_min < 2 || pcm_periods_min > pcm_periods_max) {
dev_err(&vdev->dev,
"invalid range [%u %u] of the number of PCM periods\n",
pcm_periods_min, pcm_periods_max);
return -EINVAL;
}
if (!pcm_period_ms_min || pcm_period_ms_min > pcm_period_ms_max) {
dev_err(&vdev->dev,
"invalid range [%u %u] of the size of the PCM period\n",
pcm_period_ms_min, pcm_period_ms_max);
return -EINVAL;
}
if (pcm_buffer_ms < pcm_periods_min * pcm_period_ms_min) {
dev_err(&vdev->dev,
"pcm_buffer_ms(=%u) value cannot be < %u ms\n",
pcm_buffer_ms, pcm_periods_min * pcm_period_ms_min);
return -EINVAL;
}
if (pcm_period_ms_max > pcm_buffer_ms / 2) {
dev_err(&vdev->dev,
"pcm_period_ms_max(=%u) value cannot be > %u ms\n",
pcm_period_ms_max, pcm_buffer_ms / 2);
return -EINVAL;
}
return 0;
}
/**
* virtsnd_pcm_period_elapsed() - Kernel work function to handle the elapsed
* period state.
* @work: Elapsed period work.
*
* The main purpose of this function is to call snd_pcm_period_elapsed() in
* a process context, not in an interrupt context. This is necessary because PCM
* devices operate in non-atomic mode.
*
* Context: Process context.
*/
static void virtsnd_pcm_period_elapsed(struct work_struct *work)
{
struct virtio_pcm_substream *vss =
container_of(work, struct virtio_pcm_substream, elapsed_period);
snd_pcm_period_elapsed(vss->substream);
}
/**
* virtsnd_pcm_parse_cfg() - Parse the stream configuration.
* @snd: VirtIO sound device.
*
* This function is called during initial device initialization.
*
* Context: Any context that permits to sleep.
* Return: 0 on success, -errno on failure.
*/
int virtsnd_pcm_parse_cfg(struct virtio_snd *snd)
{
struct virtio_device *vdev = snd->vdev;
struct virtio_snd_pcm_info *info;
u32 i;
int rc;
virtio_cread_le(vdev, struct virtio_snd_config, streams,
&snd->nsubstreams);
if (!snd->nsubstreams)
return 0;
snd->substreams = devm_kcalloc(&vdev->dev, snd->nsubstreams,
sizeof(*snd->substreams), GFP_KERNEL);
if (!snd->substreams)
return -ENOMEM;
info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
rc = virtsnd_ctl_query_info(snd, VIRTIO_SND_R_PCM_INFO, 0,
snd->nsubstreams, sizeof(*info), info);
if (rc)
goto on_exit;
for (i = 0; i < snd->nsubstreams; ++i) {
struct virtio_pcm_substream *vss = &snd->substreams[i];
struct virtio_pcm *vpcm;
vss->snd = snd;
vss->sid = i;
INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed);
init_waitqueue_head(&vss->msg_empty);
spin_lock_init(&vss->lock);
rc = virtsnd_pcm_build_hw(vss, &info[i]);
if (rc)
goto on_exit;
vss->nid = le32_to_cpu(info[i].hdr.hda_fn_nid);
vpcm = virtsnd_pcm_find_or_create(snd, vss->nid);
if (IS_ERR(vpcm)) {
rc = PTR_ERR(vpcm);
goto on_exit;
}
switch (info[i].direction) {
case VIRTIO_SND_D_OUTPUT:
vss->direction = SNDRV_PCM_STREAM_PLAYBACK;
break;
case VIRTIO_SND_D_INPUT:
vss->direction = SNDRV_PCM_STREAM_CAPTURE;
break;
default:
dev_err(&vdev->dev, "SID %u: unknown direction (%u)\n",
vss->sid, info[i].direction);
rc = -EINVAL;
goto on_exit;
}
vpcm->streams[vss->direction].nsubstreams++;
}
on_exit:
kfree(info);
return rc;
}
/**
* virtsnd_pcm_build_devs() - Build ALSA PCM devices.
* @snd: VirtIO sound device.
*
* Context: Any context that permits to sleep.
* Return: 0 on success, -errno on failure.
*/
int virtsnd_pcm_build_devs(struct virtio_snd *snd)
{
struct virtio_device *vdev = snd->vdev;
struct virtio_pcm *vpcm;
u32 i;
int rc;
list_for_each_entry(vpcm, &snd->pcm_list, list) {
unsigned int npbs =
vpcm->streams[SNDRV_PCM_STREAM_PLAYBACK].nsubstreams;
unsigned int ncps =
vpcm->streams[SNDRV_PCM_STREAM_CAPTURE].nsubstreams;
if (!npbs && !ncps)
continue;
rc = snd_pcm_new(snd->card, VIRTIO_SND_CARD_DRIVER, vpcm->nid,
npbs, ncps, &vpcm->pcm);
if (rc) {
dev_err(&vdev->dev, "snd_pcm_new[%u] failed: %d\n",
vpcm->nid, rc);
return rc;
}
vpcm->pcm->info_flags = 0;
vpcm->pcm->dev_class = SNDRV_PCM_CLASS_GENERIC;
vpcm->pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX;
snprintf(vpcm->pcm->name, sizeof(vpcm->pcm->name),
VIRTIO_SND_PCM_NAME " %u", vpcm->pcm->device);
vpcm->pcm->private_data = vpcm;
vpcm->pcm->nonatomic = true;
for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) {
struct virtio_pcm_stream *stream = &vpcm->streams[i];
if (!stream->nsubstreams)
continue;
stream->substreams =
devm_kcalloc(&vdev->dev, stream->nsubstreams,
sizeof(*stream->substreams),
GFP_KERNEL);
if (!stream->substreams)
return -ENOMEM;
stream->nsubstreams = 0;
}
}
for (i = 0; i < snd->nsubstreams; ++i) {
struct virtio_pcm_stream *vs;
struct virtio_pcm_substream *vss = &snd->substreams[i];
vpcm = virtsnd_pcm_find(snd, vss->nid);
if (IS_ERR(vpcm))
return PTR_ERR(vpcm);
vs = &vpcm->streams[vss->direction];
vs->substreams[vs->nsubstreams++] = vss;
}
list_for_each_entry(vpcm, &snd->pcm_list, list) {
for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) {
struct virtio_pcm_stream *vs = &vpcm->streams[i];
struct snd_pcm_str *ks = &vpcm->pcm->streams[i];
struct snd_pcm_substream *kss;
if (!vs->nsubstreams)
continue;
for (kss = ks->substream; kss; kss = kss->next)
vs->substreams[kss->number]->substream = kss;
snd_pcm_set_ops(vpcm->pcm, i, &virtsnd_pcm_ops[i]);
}
snd_pcm_set_managed_buffer_all(vpcm->pcm,
SNDRV_DMA_TYPE_VMALLOC, NULL,
0, 0);
}
return 0;
}
/**
* virtsnd_pcm_event() - Handle the PCM device event notification.
* @snd: VirtIO sound device.
* @event: VirtIO sound event.
*
* Context: Interrupt context.
*/
void virtsnd_pcm_event(struct virtio_snd *snd, struct virtio_snd_event *event)
{
struct virtio_pcm_substream *vss;
u32 sid = le32_to_cpu(event->data);
if (sid >= snd->nsubstreams)
return;
vss = &snd->substreams[sid];
switch (le32_to_cpu(event->hdr.code)) {
case VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED:
/* TODO: deal with shmem elapsed period */
break;
case VIRTIO_SND_EVT_PCM_XRUN:
spin_lock(&vss->lock);
if (vss->xfer_enabled)
vss->xfer_xrun = true;
spin_unlock(&vss->lock);
break;
}
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Felix Fietkau <[email protected]>
* Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
* Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <[email protected]>
*/
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
#include "qca8k_leds.h"
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
regaddr >>= 1;
*r1 = regaddr & 0x1e;
regaddr >>= 5;
*r2 = regaddr & 0x7;
regaddr >>= 3;
*page = regaddr & 0x3ff;
}
static int
qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
int ret;
u16 lo;
lo = val & 0xffff;
ret = bus->write(bus, phy_id, regnum, lo);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit lo register\n");
return ret;
}
static int
qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
int ret;
u16 hi;
hi = (u16)(val >> 16);
ret = bus->write(bus, phy_id, regnum, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit hi register\n");
return ret;
}
static int
qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret < 0)
goto err;
*val = ret & 0xffff;
return 0;
err:
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit lo register\n");
*val = 0;
return ret;
}
static int
qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret < 0)
goto err;
*val = ret << 16;
return 0;
err:
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit hi register\n");
*val = 0;
return ret;
}
static int
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
u32 hi, lo;
int ret;
*val = 0;
ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
if (ret < 0)
goto err;
ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
if (ret < 0)
goto err;
*val = lo | hi;
err:
return ret;
}
static void
qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
return;
qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
}
static int
qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
u16 *cached_page = &priv->mdio_cache.page;
struct mii_bus *bus = priv->bus;
int ret;
if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to set qca8k page\n");
return ret;
}
*cached_page = page;
usleep_range(1000, 2000);
return 0;
}
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data;
struct qca8k_priv *priv = ds->priv;
struct qca_mgmt_ethhdr *mgmt_ethhdr;
u32 command;
u8 len, cmd;
int i;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
mgmt_eth_data = &priv->mgmt_eth_data;
command = get_unaligned_le32(&mgmt_ethhdr->command);
cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
/* Special case for len of 15 as this is the max value for len and needs to
* be increased before converting it from word to dword.
*/
if (len == 15)
len++;
/* We can ignore odd value, we always round up them in the alloc function. */
len *= sizeof(u16);
/* Make sure the seq match the requested packet */
if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
u32 *val = mgmt_eth_data->data;
*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
/* Get the rest of the 12 byte of data.
* The read/write function will extract the requested data.
*/
if (len > QCA_HDR_MGMT_DATA1_LEN) {
__le32 *data2 = (__le32 *)skb->data;
int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
len - QCA_HDR_MGMT_DATA1_LEN);
val++;
for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
*val = get_unaligned_le32(data2);
val++;
data2++;
}
}
}
complete(&mgmt_eth_data->rw_done);
}
static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
int priority, unsigned int len)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
unsigned int real_len;
struct sk_buff *skb;
__le32 *data2;
u32 command;
u16 hdr;
int i;
skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
if (!skb)
return NULL;
/* Hdr mgmt length value is in step of word size.
* As an example to process 4 byte of data the correct length to set is 2.
* To process 8 byte 4, 12 byte 6, 16 byte 8...
*
* Odd values will always return the next size on the ack packet.
* (length of 3 (6 byte) will always return 8 bytes of data)
*
* This means that a value of 15 (0xf) actually means reading/writing 32 bytes
* of data.
*
* To correctly calculate the length we devide the requested len by word and
* round up.
* On the ack function we can skip the odd check as we already handle the
* case here.
*/
real_len = DIV_ROUND_UP(len, sizeof(u16));
/* We check if the result len is odd and we round up another time to
* the next size. (length of 3 will be increased to 4 as switch will always
* return 8 bytes)
*/
if (real_len % sizeof(u16) != 0)
real_len++;
/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
if (real_len == 16)
real_len--;
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
hdr |= QCA_HDR_XMIT_FROM_CPU;
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
QCA_HDR_MGMT_CHECK_CODE_VAL);
put_unaligned_le32(command, &mgmt_ethhdr->command);
if (cmd == MDIO_WRITE)
put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
mgmt_ethhdr->hdr = htons(hdr);
data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
len - QCA_HDR_MGMT_DATA1_LEN);
val++;
for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
put_unaligned_le32(*val, data2);
data2++;
val++;
}
}
return skb;
}
static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
u32 seq;
seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
put_unaligned_le32(seq, &mgmt_ethhdr->seq);
}
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check if the mgmt_conduit if is operational */
if (!priv->mgmt_conduit) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check if the mgmt_conduit if is operational */
if (!priv->mgmt_conduit) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int
qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
u32 val = 0;
int ret;
ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
if (ret)
return ret;
val &= ~mask;
val |= write_val;
return qca8k_write_eth(priv, reg, &val, sizeof(val));
}
static int
qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
uint32_t mask, uint32_t write_val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
if (ret < 0)
goto exit;
val &= ~mask;
val |= write_val;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
struct qca8k_priv *priv = ctx;
u32 reg = *(u16 *)reg_buf;
if (priv->mgmt_conduit &&
!qca8k_read_eth(priv, reg, val_buf, val_len))
return 0;
/* loop count times and increment reg of 4 */
for (i = 0; i < count; i++, reg += sizeof(u32)) {
ret = qca8k_read_mii(priv, reg, val_buf + i);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
const void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
struct qca8k_priv *priv = ctx;
u32 reg = *(u16 *)reg_buf;
u32 *val = (u32 *)val_buf;
if (priv->mgmt_conduit &&
!qca8k_write_eth(priv, reg, val, val_len))
return 0;
/* loop count times, increment reg of 4 and increment val ptr to
* the next value
*/
for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
ret = qca8k_write_mii(priv, reg, *val);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
{
return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
bytes - sizeof(u16));
}
static int
qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
{
struct qca8k_priv *priv = ctx;
if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
return 0;
return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
}
static const struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
.read = qca8k_bulk_read,
.write = qca8k_bulk_write,
.reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
.disable_locking = true, /* Locking is handled by qca8k read/write */
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
.max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
/* ATU regs suffer from a bug where some data are not correctly
* written. Disable bulk write to correctly write ATU entry.
*/
.use_single_write = true,
};
static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
struct sk_buff *read_skb, u32 *val)
{
struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
bool ack;
int ret;
if (!skb)
return -ENOMEM;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the copy pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
*val = mgmt_eth_data->data[0];
return 0;
}
static int
qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
int regnum, u16 data)
{
struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
struct net_device *mgmt_conduit;
int ret, ret1;
bool ack;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
mgmt_eth_data = &priv->mgmt_eth_data;
write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
if (read) {
write_val |= QCA8K_MDIO_MASTER_READ;
} else {
write_val |= QCA8K_MDIO_MASTER_WRITE;
write_val |= QCA8K_MDIO_MASTER_DATA(data);
}
/* Prealloc all the needed skb before the lock */
write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
if (!write_skb)
return -ENOMEM;
clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!clear_skb) {
ret = -ENOMEM;
goto err_clear_skb;
}
read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!read_skb) {
ret = -ENOMEM;
goto err_read_skb;
}
/* It seems that accessing the switch's internal PHYs via management
* packets still uses the MDIO bus within the switch internally, and
* these accesses can conflict with external MDIO accesses to other
* devices on the MDIO bus.
* We therefore need to lock the MDIO bus onto which the switch is
* connected.
*/
mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master (even with error)
*/
mutex_lock(&mgmt_eth_data->mutex);
/* Check if mgmt_conduit is operational */
mgmt_conduit = priv->mgmt_conduit;
if (!mgmt_conduit) {
mutex_unlock(&mgmt_eth_data->mutex);
mutex_unlock(&priv->bus->mdio_lock);
ret = -EINVAL;
goto err_mgmt_conduit;
}
read_skb->dev = mgmt_conduit;
clear_skb->dev = mgmt_conduit;
write_skb->dev = mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(write_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
kfree_skb(read_skb);
goto exit;
}
if (!ack) {
ret = -EINVAL;
kfree_skb(read_skb);
goto exit;
}
ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
!(val & QCA8K_MDIO_MASTER_BUSY), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
mgmt_eth_data, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
goto exit;
}
if (read) {
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(read_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
goto exit;
}
if (!ack) {
ret = -EINVAL;
goto exit;
}
ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
} else {
kfree_skb(read_skb);
}
exit:
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(clear_skb);
wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
mutex_unlock(&priv->bus->mdio_lock);
return ret;
/* Error handling before lock */
err_mgmt_conduit:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
err_clear_skb:
kfree_skb(write_skb);
return ret;
}
static int
qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
u16 r1, r2, page;
u32 val;
int ret, ret1;
qca8k_split_addr(reg, &r1, &r2, &page);
ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
bus, 0x10 | r2, r1 + 1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
*/
if (ret < 0 && ret1 < 0)
return ret1;
return ret;
}
static int
qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
goto exit;
ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
if (ret >= 0)
ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
return ret;
}
static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
if (!ret)
return 0;
return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
if (ret >= 0)
return ret;
ret = qca8k_mdio_read(priv, phy, regnum);
if (ret < 0)
return 0xffff;
return ret;
}
static int
qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
int ret = 0;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (mdio && !of_device_is_available(mdio))
goto out_put_node;
bus = devm_mdiobus_alloc(dev);
if (!bus) {
ret = -ENOMEM;
goto out_put_node;
}
priv->internal_mdio_bus = bus;
bus->priv = (void *)priv;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
ds->dst->index, ds->index);
bus->parent = dev;
if (mdio) {
/* Check if the device tree declares the port:phy mapping */
bus->name = "qca8k user mii";
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
} else {
/* If a mapping can't be found, the legacy mapping is used,
* using qca8k_port_to_phy()
*/
ds->user_mii_bus = bus;
bus->phy_mask = ~ds->phys_mii_mask;
bus->name = "qca8k-legacy user mii";
bus->read = qca8k_legacy_mdio_read;
bus->write = qca8k_legacy_mdio_write;
}
ret = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
return ret;
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
int ret;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
ret = of_property_read_u32(port, "reg", ®);
if (ret) {
of_node_put(port);
of_node_put(ports);
return ret;
}
if (!dsa_is_user_port(priv->ds, reg))
continue;
of_get_phy_mode(port, &mode);
if (of_property_read_bool(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
}
of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs. It's not possible to use both
* configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
}
if (external_mdio_mask) {
/* Make sure to disable the internal mdio bus in cases
* a dt-overlay and driver reload changed the configuration
*/
return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_EN);
}
return qca8k_mdio_register(priv);
}
static int
qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
u32 mask = 0;
int ret = 0;
/* SoC specific settings for ipq8064.
* If more device require this consider adding
* a dedicated binding.
*/
if (of_machine_is_compatible("qcom,ipq8064"))
mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
/* SoC specific settings for ipq8065 */
if (of_machine_is_compatible("qcom,ipq8065"))
mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
if (mask) {
ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
QCA8K_MAC_PWR_RGMII0_1_8V |
QCA8K_MAC_PWR_RGMII1_1_8V,
mask);
}
return ret;
}
static int qca8k_find_cpu_port(struct dsa_switch *ds)
{
struct qca8k_priv *priv = ds->priv;
/* Find the connected cpu port. Valid port are 0 or 6 */
if (dsa_is_cpu_port(ds, 0))
return 0;
dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
if (dsa_is_cpu_port(ds, 6))
return 6;
return -EINVAL;
}
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
const struct qca8k_match_data *data = priv->info;
struct device_node *node = priv->dev->of_node;
u32 val = 0;
int ret;
/* QCA8327 require to set to the correct mode.
* His bigger brother QCA8328 have the 172 pin layout.
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
val);
if (ret)
return ret;
}
if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
val |= QCA8K_PWS_POWER_ON_SEL;
if (of_property_read_bool(node, "qca,led-open-drain")) {
if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
return -EINVAL;
}
val |= QCA8K_PWS_LED_OPEN_EN_CSR;
}
return qca8k_rmw(priv, QCA8K_REG_PWS,
QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
val);
}
static int
qca8k_parse_port_config(struct qca8k_priv *priv)
{
int port, cpu_port_index = -1, ret;
struct device_node *port_dn;
phy_interface_t mode;
struct dsa_port *dp;
u32 delay;
/* We have 2 CPU port. Check them */
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
dp = dsa_to_port(priv->ds, port);
port_dn = dp->dn;
cpu_port_index++;
if (!of_device_is_available(port_dn))
continue;
ret = of_get_phy_mode(port_dn, &mode);
if (ret)
continue;
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_SGMII:
delay = 0;
if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
delay = 0;
if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
/* Skip sgmii parsing for rgmii* mode */
if (mode == PHY_INTERFACE_MODE_RGMII ||
mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
break;
if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
priv->ports_config.sgmii_tx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
priv->ports_config.sgmii_rx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
priv->ports_config.sgmii_enable_pll = true;
if (priv->switch_id == QCA8K_ID_QCA8327) {
dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
priv->ports_config.sgmii_enable_pll = false;
}
if (priv->switch_revision < 2)
dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
}
break;
default:
continue;
}
}
return 0;
}
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
{
u32 delay, val = 0;
int ret;
/* Delay can be declared in 3 different way.
* Mode to rgmii and internal-delay standard binding defined
* rgmii-id or rgmii-tx/rx phy mode set.
* The parse logic set a delay different than 0 only when one
* of the 3 different way is used. In all other case delay is
* not enabled. With ID or TX/RXID delay is enabled and set
* to the default and recommended value.
*/
if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
}
if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
}
/* Set RGMII delay based on the selected values */
ret = qca8k_rmw(priv, reg,
QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
val);
if (ret)
dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
static struct phylink_pcs *
qca8k_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct dsa_port *dp = dsa_phylink_to_port(config);
struct qca8k_priv *priv = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
int port = dp->index;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
switch (port) {
case 0:
pcs = &priv->pcs_port_0.pcs;
break;
case 6:
pcs = &priv->pcs_port_6.pcs;
break;
}
break;
default:
break;
}
return pcs;
}
static void
qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct dsa_port *dp = dsa_phylink_to_port(config);
struct dsa_switch *ds = dp->ds;
struct qca8k_priv *priv;
int port = dp->index;
int cpu_port_index;
u32 reg;
priv = ds->priv;
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY, nothing to do */
return;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
return;
}
if (port != 6 && phylink_autoneg_inband(mode)) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
}
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
/* Configure rgmii delay */
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* QCA8337 requires to set rgmii rx delay for all ports.
* This is enabled through PORT5_PAD_CTRL for all ports,
* rather than individual port registers.
*/
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
phy_modes(state->interface), port);
return;
}
}
static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0: /* 1st CPU port */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 6: /* 2nd CPU port / external PHY */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
config->supported_interfaces);
break;
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
}
static void
qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct dsa_port *dp = dsa_phylink_to_port(config);
struct qca8k_priv *priv = dp->ds->priv;
qca8k_port_set_status(priv, dp->index, 0);
}
static void
qca8k_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct dsa_port *dp = dsa_phylink_to_port(config);
struct qca8k_priv *priv = dp->ds->priv;
int port = dp->index;
u32 reg;
if (phylink_autoneg_inband(mode)) {
reg = QCA8K_PORT_STATUS_LINK_AUTO;
} else {
switch (speed) {
case SPEED_10:
reg = QCA8K_PORT_STATUS_SPEED_10;
break;
case SPEED_100:
reg = QCA8K_PORT_STATUS_SPEED_100;
break;
case SPEED_1000:
reg = QCA8K_PORT_STATUS_SPEED_1000;
break;
default:
reg = QCA8K_PORT_STATUS_LINK_AUTO;
break;
}
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
if (rx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_RXFLOW;
if (tx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct qca8k_pcs, pcs);
}
static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int port = pcs_to_qca8k_pcs(pcs)->port;
u32 reg;
int ret;
ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
if (ret < 0) {
state->link = false;
return;
}
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
DUPLEX_HALF;
switch (reg & QCA8K_PORT_STATUS_SPEED) {
case QCA8K_PORT_STATUS_SPEED_10:
state->speed = SPEED_10;
break;
case QCA8K_PORT_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case QCA8K_PORT_STATUS_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
if (reg & QCA8K_PORT_STATUS_RXFLOW)
state->pause |= MLO_PAUSE_RX;
if (reg & QCA8K_PORT_STATUS_TXFLOW)
state->pause |= MLO_PAUSE_TX;
}
static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int cpu_port_index, ret, port;
u32 reg, val;
port = pcs_to_qca8k_pcs(pcs)->port;
switch (port) {
case 0:
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
case 6:
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
WARN_ON(1);
return -EINVAL;
}
/* Enable/disable SerDes auto-negotiation as necessary */
val = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ?
0 : QCA8K_PWS_SERDES_AEN_DIS;
ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, val);
if (ret)
return ret;
/* Configure the SGMII parameters */
ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
if (ret)
return ret;
val |= QCA8K_SGMII_EN_SD;
if (priv->ports_config.sgmii_enable_pll)
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(priv->ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_PHY;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_MAC;
} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_BASEX;
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
/* From original code is reported port instability as SGMII also
* require delay set. Apply advised values here or take them from DT.
*/
if (interface == PHY_INTERFACE_MODE_SGMII)
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
if (priv->switch_id == QCA8K_ID_QCA8327 ||
priv->switch_id == QCA8K_ID_QCA8337)
reg = QCA8K_REG_PORT0_PAD_CTRL;
val = 0;
/* SGMII Clock phase configuration */
if (priv->ports_config.sgmii_rx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
if (priv->ports_config.sgmii_tx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
if (val)
ret = qca8k_rmw(priv, reg,
QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
val);
return 0;
}
static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
{
}
static const struct phylink_pcs_ops qca8k_pcs_ops = {
.pcs_get_state = qca8k_pcs_get_state,
.pcs_config = qca8k_pcs_config,
.pcs_an_restart = qca8k_pcs_an_restart,
};
static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
qpcs->priv = priv;
qpcs->port = port;
}
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
struct mib_ethhdr *mib_ethhdr;
__le32 *data2;
u8 port;
int i;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
/* The switch autocast every port. Ignore other packet and
* parse only the requested one.
*/
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
if (port != mib_eth_data->req_port)
goto exit;
data2 = (__le32 *)skb->data;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
if (i < 3) {
mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
continue;
}
/* Some mib are 64 bit wide */
if (mib->size == 2)
mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
else
mib_eth_data->data[i] = get_unaligned_le32(data2);
data2 += mib->size;
}
exit:
/* Complete on receiving all the mib packet */
if (refcount_dec_and_test(&mib_eth_data->port_parsed))
complete(&mib_eth_data->rw_done);
}
static int
qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
int ret;
mib_eth_data = &priv->mib_eth_data;
mutex_lock(&mib_eth_data->mutex);
reinit_completion(&mib_eth_data->rw_done);
mib_eth_data->req_port = dp->index;
mib_eth_data->data = data;
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
mutex_lock(&priv->reg_mutex);
/* Send mib autocast request */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
QCA8K_MIB_BUSY);
mutex_unlock(&priv->reg_mutex);
if (ret)
goto exit;
ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
exit:
mutex_unlock(&mib_eth_data->mutex);
return ret;
}
static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
/* Communicate to the phy internal driver the switch revision.
* Based on the switch revision different values needs to be
* set to the dbg and mmd reg on the phy.
* The first 2 bit are used to communicate the switch revision
* to the phy driver.
*/
if (port > 0 && port < 6)
return priv->switch_revision;
return 0;
}
static enum dsa_tag_protocol
qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
static void
qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit,
bool operational)
{
struct dsa_port *dp = conduit->dsa_ptr;
struct qca8k_priv *priv = ds->priv;
/* Ethernet MIB/MDIO is only supported for CPU port 0 */
if (dp->index != 0)
return;
mutex_lock(&priv->mgmt_eth_data.mutex);
mutex_lock(&priv->mib_eth_data.mutex);
priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL;
mutex_unlock(&priv->mib_eth_data.mutex);
mutex_unlock(&priv->mgmt_eth_data.mutex);
}
static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct qca_tagger_data *tagger_data;
switch (proto) {
case DSA_TAG_PROTO_QCA:
tagger_data = ds->tagger_data;
tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static void qca8k_setup_hol_fixup(struct qca8k_priv *priv, int port)
{
u32 mask;
switch (port) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
*/
case 0:
case 5:
case 6:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
break;
default:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
}
regmap_write(priv->regmap, QCA8K_REG_PORT_HOL_CTRL0(port), mask);
mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN;
regmap_update_bits(priv->regmap, QCA8K_REG_PORT_HOL_CTRL1(port),
QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = ds->priv;
struct dsa_port *dp;
int cpu_port, ret;
u32 mask;
cpu_port = qca8k_find_cpu_port(ds);
if (cpu_port < 0) {
dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
return cpu_port;
}
/* Parse CPU port config to be later used in phy_link mac_config */
ret = qca8k_parse_port_config(priv);
if (ret)
return ret;
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
ret = qca8k_setup_of_pws_reg(priv);
if (ret)
return ret;
ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
ret = qca8k_setup_led_ctrl(priv);
if (ret)
return ret;
qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
/* Make sure MAC06 is disabled */
ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
if (ret) {
dev_err(priv->dev, "failed disabling MAC06 exchange");
return ret;
}
/* Enable CPU Port */
ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
if (ret) {
dev_err(priv->dev, "failed enabling CPU port");
return ret;
}
/* Enable MIB counters */
ret = qca8k_mib_init(priv);
if (ret)
dev_warn(priv->dev, "mib init failed");
/* Initial setup of all ports */
dsa_switch_for_each_port(dp, ds) {
/* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(dp->index),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
}
/* Disable MAC by default on all user ports */
dsa_switch_for_each_user_port(dp, ds)
qca8k_port_set_status(priv, dp->index, 0);
/* Enable QCA header mode on all cpu ports */
dsa_switch_for_each_cpu_port(dp, ds) {
ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(dp->index),
FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
if (ret) {
dev_err(priv->dev, "failed enabling QCA header mode on port %d", dp->index);
return ret;
}
}
/* Forward all unknown frames to CPU port for Linux processing
* Notice that in multi-cpu config only one port should be set
* for igmp, unknown, multicast and broadcast packet
*/
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
if (ret)
return ret;
/* CPU port gets connected to all user ports of the switch */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
/* Setup connection between CPU port & user ports
* Individual user ports get connected to CPU port only
*/
dsa_switch_for_each_user_port(dp, ds) {
u8 port = dp->index;
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER,
BIT(cpu_port));
if (ret)
return ret;
ret = regmap_clear_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_LEARN);
if (ret)
return ret;
/* For port based vlans to work we need to set the
* default egress vid
*/
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
QCA8K_EGREES_VLAN_PORT_MASK(port),
QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
if (ret)
return ret;
}
/* The port 5 of the qca8337 have some problem in flood condition. The
* original legacy driver had some specific buffer and priority settings
* for the different port suggested by the QCA switch team. Add this
* missing settings to improve switch stability under load condition.
* This problem is limited to qca8337 and other qca8k switch are not affected.
*/
if (priv->switch_id == QCA8K_ID_QCA8337)
dsa_switch_for_each_available_port(dp, ds)
qca8k_setup_hol_fixup(priv, dp->index);
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
if (priv->switch_id == QCA8K_ID_QCA8327) {
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
mask);
}
/* Setup our port MTUs to match power on defaults */
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
/* Flush the FDB table */
qca8k_fdb_flush(priv);
/* Set min a max ageing value supported */
ds->ageing_time_min = 7000;
ds->ageing_time_max = 458745000;
/* Set max number of LAGs supported */
ds->num_lag_ids = QCA8K_NUM_LAGS;
return 0;
}
static const struct phylink_mac_ops qca8k_phylink_mac_ops = {
.mac_select_pcs = qca8k_phylink_mac_select_pcs,
.mac_config = qca8k_phylink_mac_config,
.mac_link_down = qca8k_phylink_mac_link_down,
.mac_link_up = qca8k_phylink_mac_link_up,
};
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
.port_change_mtu = qca8k_port_change_mtu,
.port_max_mtu = qca8k_port_max_mtu,
.port_stp_state_set = qca8k_port_stp_state_set,
.port_pre_bridge_flags = qca8k_port_pre_bridge_flags,
.port_bridge_flags = qca8k_port_bridge_flags,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
.port_fast_age = qca8k_port_fast_age,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_mdb_add = qca8k_port_mdb_add,
.port_mdb_del = qca8k_port_mdb_del,
.port_mirror_add = qca8k_port_mirror_add,
.port_mirror_del = qca8k_port_mirror_del,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_get_caps = qca8k_phylink_get_caps,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
.conduit_state_change = qca8k_conduit_change,
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv;
int ret;
/* allocate the private data struct so that we can probe the switches
* ID register
*/
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(priv->reset_gpio))
return PTR_ERR(priv->reset_gpio);
if (priv->reset_gpio) {
/* The active low duration must be greater than 10 ms
* and checkpatch.pl wants 20 ms.
*/
msleep(20);
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
/* Start by setting up the register mapping */
priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
&qca8k_regmap_config);
if (IS_ERR(priv->regmap)) {
dev_err(priv->dev, "regmap initialization failed");
return PTR_ERR(priv->regmap);
}
priv->mdio_cache.page = 0xffff;
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
return ret;
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
mutex_init(&priv->mgmt_eth_data.mutex);
init_completion(&priv->mgmt_eth_data.rw_done);
mutex_init(&priv->mib_eth_data.mutex);
init_completion(&priv->mib_eth_data.rw_done);
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
return dsa_register_switch(priv->ds);
}
static void
qca8k_sw_remove(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
if (!priv)
return;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
int port;
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Do not enable on resume if the port was
* disabled before.
*/
if (!(priv->port_enabled_map & BIT(port)))
continue;
qca8k_port_set_status(priv, port, enable);
}
}
static int qca8k_suspend(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 0);
return dsa_switch_suspend(priv->ds);
}
static int qca8k_resume(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 1);
return dsa_switch_resume(priv->ds);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
static const struct qca8k_info_ops qca8xxx_ops = {
.autocast_mib = qca8k_get_ethtool_stats_eth,
};
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
.mib_count = QCA8K_QCA833X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct of_device_id qca8k_of_match[] = {
{ .compatible = "qca,qca8327", .data = &qca8327 },
{ .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
};
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
.shutdown = qca8k_sw_shutdown,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
.pm = &qca8k_pm_ops,
},
};
mdio_module_driver(qca8kmdio_driver);
MODULE_AUTHOR("Mathieu Olivari, John Crispin <[email protected]>");
MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qca8k");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
#include <linux/const.h>
#include <vdso/page.h>
/* Flushing for D-cache alias handling is only needed if
* the page size is smaller than 16K.
*/
#if PAGE_SHIFT < 14
#define DCACHE_ALIASING_POSSIBLE
#endif
#define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
#define HPAGE_16GB_SHIFT 34
#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 5
#endif
#ifndef __ASSEMBLY__
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
struct pt_regs;
void hugetlb_setup(struct pt_regs *regs);
#endif
#define WANT_PAGE_VIRTUAL
void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X))
struct page;
void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
struct vm_area_struct;
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_HIGHPAGE
void copy_highpage(struct page *to, struct page *from);
/* Unlike sparc32, sparc64's parameter passing API is more
* sane in that structures which as small enough are passed
* in registers instead of on the stack. Thus, setting
* STRICT_MM_TYPECHECKS does not generate worse code so
* let's enable it to get the type checking.
*/
#define STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking.. */
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#else
/* .. while these make it easier on the compiler */
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
typedef unsigned long pmd_t;
typedef unsigned long pud_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define iopte_val(x) (x)
#define pmd_val(x) (x)
#define pud_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __iopte(x) (x)
#define __pmd(x) (x)
#define __pud(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif /* (STRICT_MM_TYPECHECKS) */
typedef pte_t *pgtable_t;
extern unsigned long sparc64_va_hole_top;
extern unsigned long sparc64_va_hole_bottom;
/* The next two defines specify the actual exclusion region we
* enforce, wherein we use a 4GB red zone on each side of the VA hole.
*/
#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL))
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
_AC(0x0000000070000000,UL) : \
VA_EXCLUDE_END)
#include <asm-generic/memory_model.h>
extern unsigned long PAGE_OFFSET;
#endif /* !(__ASSEMBLY__) */
/* The maximum number of physical memory address bits we support. The
* largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
* evaluates to.
*/
#define MAX_PHYS_ADDRESS_BITS 53
#define ILOG2_4MB 22
#define ILOG2_256MB 28
#ifndef __ASSEMBLY__
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_phys __pa
#define phys_to_virt __va
#endif /* !(__ASSEMBLY__) */
#include <asm-generic/getorder.h>
#endif /* _SPARC64_PAGE_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* AFS filesystem directory editing
*
* Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/iversion.h>
#include "internal.h"
#include "xdr_fs.h"
/*
* Find a number of contiguous clear bits in a directory block bitmask.
*
* There are 64 slots, which means we can load the entire bitmap into a
* variable. The first bit doesn't count as it corresponds to the block header
* slot. nr_slots is between 1 and 9.
*/
static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_slots)
{
u64 bitmap;
u32 mask;
int bit, n;
bitmap = (u64)block->hdr.bitmap[0] << 0 * 8;
bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
bitmap >>= 1; /* The first entry is metadata */
bit = 1;
mask = (1 << nr_slots) - 1;
do {
if (sizeof(unsigned long) == 8)
n = ffz(bitmap);
else
n = ((u32)bitmap) != 0 ?
ffz((u32)bitmap) :
ffz((u32)(bitmap >> 32)) + 32;
bitmap >>= n;
bit += n;
if ((bitmap & mask) == 0) {
if (bit > 64 - nr_slots)
return -1;
return bit;
}
n = __ffs(bitmap);
bitmap >>= n;
bit += n;
} while (bitmap);
return -1;
}
/*
* Set a number of contiguous bits in the directory block bitmap.
*/
static void afs_set_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
block->hdr.bitmap[3] |= (u8)(mask >> 3 * 8);
block->hdr.bitmap[4] |= (u8)(mask >> 4 * 8);
block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
}
/*
* Clear a number of contiguous bits in the directory block bitmap.
*/
static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
block->hdr.bitmap[3] &= ~(u8)(mask >> 3 * 8);
block->hdr.bitmap[4] &= ~(u8)(mask >> 4 * 8);
block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
}
/*
* Get a new directory folio.
*/
static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
{
struct address_space *mapping = vnode->netfs.inode.i_mapping;
struct folio *folio;
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping->gfp_mask);
if (IS_ERR(folio)) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
return NULL;
}
if (!folio_test_private(folio))
folio_attach_private(folio, (void *)1);
return folio;
}
/*
* Scan a directory block looking for a dirent of the right name.
*/
static int afs_dir_scan_block(const union afs_xdr_dir_block *block, const struct qstr *name,
unsigned int blocknum)
{
const union afs_xdr_dirent *de;
u64 bitmap;
int d, len, n;
_enter("");
bitmap = (u64)block->hdr.bitmap[0] << 0 * 8;
bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
for (d = (blocknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
d < AFS_DIR_SLOTS_PER_BLOCK;
d++) {
if (!((bitmap >> d) & 1))
continue;
de = &block->dirents[d];
if (de->u.valid != 1)
continue;
/* The block was NUL-terminated by afs_dir_check_page(). */
len = strlen(de->u.name);
if (len == name->len &&
memcmp(de->u.name, name->name, name->len) == 0)
return d;
n = round_up(12 + len + 1 + 4, AFS_DIR_DIRENT_SIZE);
n /= AFS_DIR_DIRENT_SIZE;
d += n - 1;
}
return -1;
}
/*
* Initialise a new directory block. Note that block 0 is special and contains
* some extra metadata.
*/
static void afs_edit_init_block(union afs_xdr_dir_block *meta,
union afs_xdr_dir_block *block, int block_num)
{
memset(block, 0, sizeof(*block));
block->hdr.npages = htons(1);
block->hdr.magic = AFS_DIR_MAGIC;
block->hdr.bitmap[0] = 1;
if (block_num == 0) {
block->hdr.bitmap[0] = 0xff;
block->hdr.bitmap[1] = 0x1f;
memset(block->meta.alloc_ctrs,
AFS_DIR_SLOTS_PER_BLOCK,
sizeof(block->meta.alloc_ctrs));
meta->meta.alloc_ctrs[0] =
AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS0;
}
if (block_num < AFS_DIR_BLOCKS_WITH_CTR)
meta->meta.alloc_ctrs[block_num] =
AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS;
}
/*
* Edit a directory's file data to add a new directory entry. Doing this after
* create, mkdir, symlink, link or rename if the data version number is
* incremented by exactly one avoids the need to re-download the entire
* directory contents.
*
* The caller must hold the inode locked.
*/
void afs_edit_dir_add(struct afs_vnode *vnode,
struct qstr *name, struct afs_fid *new_fid,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
int slot;
_enter(",,{%d,%s},", name->len, name->name);
i_size = i_size_read(&vnode->netfs.inode);
if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
return;
}
folio0 = afs_dir_get_folio(vnode, 0);
if (!folio0) {
_leave(" [fgp]");
return;
}
/* Work out how many slots we're going to need. */
need_slots = afs_dir_calc_slots(name->len);
meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
/* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks + 1; b++) {
/* If the directory extended into a new folio, then we need to
* tack a new folio on the end.
*/
index = b / AFS_DIR_BLOCKS_PER_PAGE;
if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
goto error;
if (index >= folio_nr_pages(folio0)) {
folio = afs_dir_get_folio(vnode, index);
if (!folio)
goto error;
} else {
folio = folio0;
}
block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
_debug("block %u: %2u %3u %u",
b,
(b < AFS_DIR_BLOCKS_WITH_CTR) ? meta->meta.alloc_ctrs[b] : 99,
ntohs(block->hdr.npages),
ntohs(block->hdr.magic));
/* Initialise the block if necessary. */
if (b == nr_blocks) {
_debug("init %u", b);
afs_edit_init_block(meta, block, b);
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
/* Only lower dir blocks have a counter in the header. */
if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] >= need_slots) {
/* We need to try and find one or more consecutive
* slots to hold the entry.
*/
slot = afs_find_contig_bits(block, need_slots);
if (slot >= 0) {
_debug("slot %u", slot);
goto found_space;
}
}
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
}
/* There are no spare slots of sufficient size, yet the operation
* succeeded. Download the directory again.
*/
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
new_directory:
afs_edit_init_block(meta, meta, 0);
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
folio = folio0;
block = kmap_local_folio(folio, 0);
nr_blocks = 1;
b = 0;
found_space:
/* Set the dirent slot. */
trace_afs_edit_dir(vnode, why, afs_edit_dir_create, b, slot,
new_fid->vnode, new_fid->unique, name->name);
de = &block->dirents[slot];
de->u.valid = 1;
de->u.unused[0] = 0;
de->u.hash_next = 0; // TODO: Really need to maintain this
de->u.vnode = htonl(new_fid->vnode);
de->u.unique = htonl(new_fid->unique);
memcpy(de->u.name, name->name, name->len + 1);
de->u.name[name->len] = 0;
/* Adjust the bitmap. */
afs_set_contig_bits(block, slot, need_slots);
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
meta->meta.alloc_ctrs[b] -= need_slots;
inode_inc_iversion_raw(&vnode->netfs.inode);
afs_stat_v(vnode, n_dir_cr);
_debug("Insert %s in %u[%u]", name->name, b, slot);
out_unmap:
kunmap_local(meta);
folio_unlock(folio0);
folio_put(folio0);
_leave("");
return;
invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
goto out_unmap;
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
/*
* Edit a directory's file data to remove a new directory entry. Doing this
* after unlink, rmdir or rename if the data version number is incremented by
* exactly one avoids the need to re-download the entire directory contents.
*
* The caller must hold the inode locked.
*/
void afs_edit_dir_remove(struct afs_vnode *vnode,
struct qstr *name, enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
int slot;
_enter(",,{%d,%s},", name->len, name->name);
i_size = i_size_read(&vnode->netfs.inode);
if (i_size < AFS_DIR_BLOCK_SIZE ||
i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
return;
}
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
folio0 = afs_dir_get_folio(vnode, 0);
if (!folio0) {
_leave(" [fgp]");
return;
}
/* Work out how many slots we're going to discard. */
need_slots = afs_dir_calc_slots(name->len);
meta = kmap_local_folio(folio0, 0);
/* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
index = b / AFS_DIR_BLOCKS_PER_PAGE;
if (index >= folio_nr_pages(folio0)) {
folio = afs_dir_get_folio(vnode, index);
if (!folio)
goto error;
} else {
folio = folio0;
}
block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
if (b > AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
slot = afs_dir_scan_block(block, name, b);
if (slot >= 0)
goto found_dirent;
}
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
}
/* Didn't find the dirent to clobber. Download the directory again. */
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
found_dirent:
de = &block->dirents[slot];
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot,
ntohl(de->u.vnode), ntohl(de->u.unique),
name->name);
memset(de, 0, sizeof(*de) * need_slots);
/* Adjust the bitmap. */
afs_clear_contig_bits(block, slot, need_slots);
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
meta->meta.alloc_ctrs[b] += need_slots;
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
afs_stat_v(vnode, n_dir_rm);
_debug("Remove %s from %u[%u]", name->name, b, slot);
out_unmap:
kunmap_local(meta);
folio_unlock(folio0);
folio_put(folio0);
_leave("");
return;
invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
kunmap_local(block);
if (folio != folio0) {
folio_unlock(folio);
folio_put(folio);
}
goto out_unmap;
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error,
0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
/*
* Edit a subdirectory that has been moved between directories to update the
* ".." entry.
*/
void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_dvnode,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *block;
union afs_xdr_dirent *de;
struct folio *folio;
unsigned int nr_blocks, b;
pgoff_t index;
loff_t i_size;
int slot;
_enter("");
i_size = i_size_read(&vnode->netfs.inode);
if (i_size < AFS_DIR_BLOCK_SIZE) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
return;
}
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
/* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
index = b / AFS_DIR_BLOCKS_PER_PAGE;
folio = afs_dir_get_folio(vnode, index);
if (!folio)
goto error;
block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
slot = afs_dir_scan_block(block, &dotdot_name, b);
if (slot >= 0)
goto found_dirent;
kunmap_local(block);
folio_unlock(folio);
folio_put(folio);
}
/* Didn't find the dirent to clobber. Download the directory again. */
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_nodd,
0, 0, 0, 0, "..");
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out;
found_dirent:
de = &block->dirents[slot];
de->u.vnode = htonl(new_dvnode->fid.vnode);
de->u.unique = htonl(new_dvnode->fid.unique);
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_dd, b, slot,
ntohl(de->u.vnode), ntohl(de->u.unique), "..");
kunmap_local(block);
folio_unlock(folio);
folio_put(folio);
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
out:
_leave("");
return;
invalidated:
kunmap_local(block);
folio_unlock(folio);
folio_put(folio);
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_inval,
0, 0, 0, 0, "..");
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out;
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_error,
0, 0, 0, 0, "..");
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out;
}
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/blktrace_api.h>
#include <linux/pr.h>
#include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <linux/io_uring/cmd.h>
#include <uapi/linux/blkdev.h>
#include "blk.h"
static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition __user *upart, int op)
{
struct gendisk *disk = bdev->bd_disk;
struct blkpg_partition p;
sector_t start, length, capacity, end;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
return -EFAULT;
if (bdev_is_partition(bdev))
return -EINVAL;
if (p.pno <= 0)
return -EINVAL;
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(disk, p.pno);
if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
return -EINVAL;
/* Check that the partition is aligned to the block size */
if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
return -EINVAL;
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;
capacity = get_capacity(disk);
if (check_add_overflow(start, length, &end))
return -EINVAL;
if (start >= capacity || end > capacity)
return -EINVAL;
switch (op) {
case BLKPG_ADD_PARTITION:
return bdev_add_partition(disk, p.pno, start, length);
case BLKPG_RESIZE_PARTITION:
return bdev_resize_partition(disk, p.pno, start, length);
default:
return -EINVAL;
}
}
static int blkpg_ioctl(struct block_device *bdev,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_partition __user *udata;
int op;
if (get_user(op, &arg->op) || get_user(udata, &arg->data))
return -EFAULT;
return blkpg_do_ioctl(bdev, udata, op);
}
#ifdef CONFIG_COMPAT
struct compat_blkpg_ioctl_arg {
compat_int_t op;
compat_int_t flags;
compat_int_t datalen;
compat_caddr_t data;
};
static int compat_blkpg_ioctl(struct block_device *bdev,
struct compat_blkpg_ioctl_arg __user *arg)
{
compat_caddr_t udata;
int op;
if (get_user(op, &arg->op) || get_user(udata, &arg->data))
return -EFAULT;
return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
}
#endif
/*
* Check that [start, start + len) is a valid range from the block device's
* perspective, including verifying that it can be correctly translated into
* logical block addresses.
*/
static int blk_validate_byte_range(struct block_device *bdev,
uint64_t start, uint64_t len)
{
unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
uint64_t end;
if ((start | len) & bs_mask)
return -EINVAL;
if (!len)
return -EINVAL;
if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev))
return -EINVAL;
return 0;
}
static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
uint64_t range[2], start, len;
struct bio *prev = NULL, *bio;
sector_t sector, nr_sects;
struct blk_plug plug;
int err;
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
start = range[0];
len = range[1];
if (!bdev_max_discard_sectors(bdev))
return -EOPNOTSUPP;
if (!(mode & BLK_OPEN_WRITE))
return -EBADF;
if (bdev_read_only(bdev))
return -EPERM;
err = blk_validate_byte_range(bdev, start, len);
if (err)
return err;
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
goto fail;
sector = start >> SECTOR_SHIFT;
nr_sects = len >> SECTOR_SHIFT;
blk_start_plug(&plug);
while (1) {
if (fatal_signal_pending(current)) {
if (prev)
bio_await_chain(prev);
err = -EINTR;
goto out_unplug;
}
bio = blk_alloc_discard_bio(bdev, §or, &nr_sects,
GFP_KERNEL);
if (!bio)
break;
prev = bio_chain_and_submit(prev, bio);
}
if (prev) {
err = submit_bio_wait(prev);
if (err == -EOPNOTSUPP)
err = 0;
bio_put(prev);
}
out_unplug:
blk_finish_plug(&plug);
fail:
filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
void __user *argp)
{
uint64_t start, len, end;
uint64_t range[2];
int err;
if (!(mode & BLK_OPEN_WRITE))
return -EBADF;
if (!bdev_max_secure_erase_sectors(bdev))
return -EOPNOTSUPP;
if (copy_from_user(range, argp, sizeof(range)))
return -EFAULT;
start = range[0];
len = range[1];
if ((start & 511) || (len & 511))
return -EINVAL;
if (check_add_overflow(start, len, &end) ||
end > bdev_nr_bytes(bdev))
return -EINVAL;
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
uint64_t range[2];
uint64_t start, end, len;
int err;
if (!(mode & BLK_OPEN_WRITE))
return -EBADF;
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
start = range[0];
len = range[1];
end = start + len - 1;
if (start & 511)
return -EINVAL;
if (len & 511)
return -EINVAL;
if (end >= (uint64_t)bdev_nr_bytes(bdev))
return -EINVAL;
if (end < start)
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
goto fail;
err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE);
fail:
filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
static int put_ushort(unsigned short __user *argp, unsigned short val)
{
return put_user(val, argp);
}
static int put_int(int __user *argp, int val)
{
return put_user(val, argp);
}
static int put_uint(unsigned int __user *argp, unsigned int val)
{
return put_user(val, argp);
}
static int put_long(long __user *argp, long val)
{
return put_user(val, argp);
}
static int put_ulong(unsigned long __user *argp, unsigned long val)
{
return put_user(val, argp);
}
static int put_u64(u64 __user *argp, u64 val)
{
return put_user(val, argp);
}
#ifdef CONFIG_COMPAT
static int compat_put_long(compat_long_t __user *argp, long val)
{
return put_user(val, argp);
}
static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
{
return put_user(val, argp);
}
#endif
#ifdef CONFIG_COMPAT
/*
* This is the equivalent of compat_ptr_ioctl(), to be used by block
* drivers that implement only commands that are completely compatible
* between 32-bit and 64-bit user space
*/
int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
if (disk->fops->ioctl)
return disk->fops->ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(arg));
return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
#endif
static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
{
/* no sense to make reservations for partitions */
if (bdev_is_partition(bdev))
return false;
if (capable(CAP_SYS_ADMIN))
return true;
/*
* Only allow unprivileged reservations if the file descriptor is open
* for writing.
*/
return mode & BLK_OPEN_WRITE;
}
static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
struct pr_registration __user *arg)
{
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
struct pr_registration reg;
if (!blkdev_pr_allowed(bdev, mode))
return -EPERM;
if (!ops || !ops->pr_register)
return -EOPNOTSUPP;
if (copy_from_user(®, arg, sizeof(reg)))
return -EFAULT;
if (reg.flags & ~PR_FL_IGNORE_KEY)
return -EOPNOTSUPP;
return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
}
static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
struct pr_reservation __user *arg)
{
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
struct pr_reservation rsv;
if (!blkdev_pr_allowed(bdev, mode))
return -EPERM;
if (!ops || !ops->pr_reserve)
return -EOPNOTSUPP;
if (copy_from_user(&rsv, arg, sizeof(rsv)))
return -EFAULT;
if (rsv.flags & ~PR_FL_IGNORE_KEY)
return -EOPNOTSUPP;
return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
}
static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
struct pr_reservation __user *arg)
{
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
struct pr_reservation rsv;
if (!blkdev_pr_allowed(bdev, mode))
return -EPERM;
if (!ops || !ops->pr_release)
return -EOPNOTSUPP;
if (copy_from_user(&rsv, arg, sizeof(rsv)))
return -EFAULT;
if (rsv.flags)
return -EOPNOTSUPP;
return ops->pr_release(bdev, rsv.key, rsv.type);
}
static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
struct pr_preempt __user *arg, bool abort)
{
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
struct pr_preempt p;
if (!blkdev_pr_allowed(bdev, mode))
return -EPERM;
if (!ops || !ops->pr_preempt)
return -EOPNOTSUPP;
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
if (p.flags)
return -EOPNOTSUPP;
return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
}
static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
struct pr_clear __user *arg)
{
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
struct pr_clear c;
if (!blkdev_pr_allowed(bdev, mode))
return -EPERM;
if (!ops || !ops->pr_clear)
return -EOPNOTSUPP;
if (copy_from_user(&c, arg, sizeof(c)))
return -EFAULT;
if (c.flags)
return -EOPNOTSUPP;
return ops->pr_clear(bdev, c.key);
}
static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
unsigned long arg)
{
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
bdev->bd_holder_ops->sync(bdev);
else {
mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
}
invalidate_bdev(bdev);
return 0;
}
static int blkdev_roset(struct block_device *bdev, unsigned cmd,
unsigned long arg)
{
int ret, n;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (get_user(n, (int __user *)arg))
return -EFAULT;
if (bdev->bd_disk->fops->set_read_only) {
ret = bdev->bd_disk->fops->set_read_only(bdev, n);
if (ret)
return ret;
}
if (n)
bdev_set_flag(bdev, BD_READ_ONLY);
else
bdev_clear_flag(bdev, BD_READ_ONLY);
return 0;
}
static int blkdev_getgeo(struct block_device *bdev,
struct hd_geometry __user *argp)
{
struct gendisk *disk = bdev->bd_disk;
struct hd_geometry geo;
int ret;
if (!argp)
return -EINVAL;
if (!disk->fops->getgeo)
return -ENOTTY;
/*
* We need to set the startsect first, the driver may
* want to override it.
*/
memset(&geo, 0, sizeof(geo));
geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo);
if (ret)
return ret;
if (copy_to_user(argp, &geo, sizeof(geo)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_hd_geometry {
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
u32 start;
};
static int compat_hdio_getgeo(struct block_device *bdev,
struct compat_hd_geometry __user *ugeo)
{
struct gendisk *disk = bdev->bd_disk;
struct hd_geometry geo;
int ret;
if (!ugeo)
return -EINVAL;
if (!disk->fops->getgeo)
return -ENOTTY;
memset(&geo, 0, sizeof(geo));
/*
* We need to set the startsect first, the driver may
* want to override it.
*/
geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo);
if (ret)
return ret;
ret = copy_to_user(ugeo, &geo, 4);
ret |= put_user(geo.start, &ugeo->start);
if (ret)
ret = -EFAULT;
return ret;
}
#endif
/* set the logical block size */
static int blkdev_bszset(struct file *file, blk_mode_t mode,
int __user *argp)
{
// this one might be file_inode(file)->i_rdev - a rare valid
// use of file_inode() for those.
dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
struct file *excl_file;
int ret, n;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
if (get_user(n, argp))
return -EFAULT;
if (mode & BLK_OPEN_EXCL)
return set_blocksize(file, n);
excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
if (IS_ERR(excl_file))
return -EBUSY;
ret = set_blocksize(excl_file, n);
fput(excl_file);
return ret;
}
/*
* Common commands that are handled the same way on native and compat
* user space. Note the separate arg/argp parameters that are needed
* to deal with the compat_ptr() conversion.
*/
static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg,
void __user *argp)
{
unsigned int max_sectors;
switch (cmd) {
case BLKFLSBUF:
return blkdev_flushbuf(bdev, cmd, arg);
case BLKROSET:
return blkdev_roset(bdev, cmd, arg);
case BLKDISCARD:
return blk_ioctl_discard(bdev, mode, arg);
case BLKSECDISCARD:
return blk_ioctl_secure_erase(bdev, mode, argp);
case BLKZEROOUT:
return blk_ioctl_zeroout(bdev, mode, arg);
case BLKGETDISKSEQ:
return put_u64(argp, bdev->bd_disk->diskseq);
case BLKREPORTZONE:
return blkdev_report_zones_ioctl(bdev, cmd, arg);
case BLKRESETZONE:
case BLKOPENZONE:
case BLKCLOSEZONE:
case BLKFINISHZONE:
return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
case BLKGETZONESZ:
return put_uint(argp, bdev_zone_sectors(bdev));
case BLKGETNRZONES:
return put_uint(argp, bdev_nr_zones(bdev));
case BLKROGET:
return put_int(argp, bdev_read_only(bdev) != 0);
case BLKSSZGET: /* get block device logical block size */
return put_int(argp, bdev_logical_block_size(bdev));
case BLKPBSZGET: /* get block device physical block size */
return put_uint(argp, bdev_physical_block_size(bdev));
case BLKIOMIN:
return put_uint(argp, bdev_io_min(bdev));
case BLKIOOPT:
return put_uint(argp, bdev_io_opt(bdev));
case BLKALIGNOFF:
return put_int(argp, bdev_alignment_offset(bdev));
case BLKDISCARDZEROES:
return put_uint(argp, 0);
case BLKSECTGET:
max_sectors = min_t(unsigned int, USHRT_MAX,
queue_max_sectors(bdev_get_queue(bdev)));
return put_ushort(argp, max_sectors);
case BLKROTATIONAL:
return put_ushort(argp, !bdev_nonrot(bdev));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
return disk_scan_partitions(bdev->bd_disk,
mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
return blk_trace_ioctl(bdev, cmd, argp);
case IOC_PR_REGISTER:
return blkdev_pr_register(bdev, mode, argp);
case IOC_PR_RESERVE:
return blkdev_pr_reserve(bdev, mode, argp);
case IOC_PR_RELEASE:
return blkdev_pr_release(bdev, mode, argp);
case IOC_PR_PREEMPT:
return blkdev_pr_preempt(bdev, mode, argp, false);
case IOC_PR_PREEMPT_ABORT:
return blkdev_pr_preempt(bdev, mode, argp, true);
case IOC_PR_CLEAR:
return blkdev_pr_clear(bdev, mode, argp);
default:
return -ENOIOCTLCMD;
}
}
/*
* Always keep this in sync with compat_blkdev_ioctl()
* to handle all incompatible commands in both functions.
*
* New commands must be compatible and go into blkdev_common_ioctl
*/
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct block_device *bdev = I_BDEV(file->f_mapping->host);
void __user *argp = (void __user *)arg;
blk_mode_t mode = file_to_blk_mode(file);
int ret;
switch (cmd) {
/* These need separate implementations for the data structure */
case HDIO_GETGEO:
return blkdev_getgeo(bdev, argp);
case BLKPG:
return blkpg_ioctl(bdev, argp);
/* Compat mode returns 32-bit data instead of 'long' */
case BLKRAGET:
case BLKFRAGET:
if (!argp)
return -EINVAL;
return put_long(argp,
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
if (bdev_nr_sectors(bdev) > ~0UL)
return -EFBIG;
return put_ulong(argp, bdev_nr_sectors(bdev));
/* The data is compatible, but the command number is different */
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
return put_int(argp, block_size(bdev));
case BLKBSZSET:
return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64:
return put_u64(argp, bdev_nr_bytes(bdev));
/* Incompatible alignment on i386 */
case BLKTRACESETUP:
return blk_trace_ioctl(bdev, cmd, argp);
default:
break;
}
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
if (ret != -ENOIOCTLCMD)
return ret;
if (!bdev->bd_disk->fops->ioctl)
return -ENOTTY;
return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
}
#ifdef CONFIG_COMPAT
#define BLKBSZGET_32 _IOR(0x12, 112, int)
#define BLKBSZSET_32 _IOW(0x12, 113, int)
#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
/* Most of the generic ioctls are handled in the normal fallback path.
This assumes the blkdev's low level compat_ioctl always returns
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
int ret;
void __user *argp = compat_ptr(arg);
struct block_device *bdev = I_BDEV(file->f_mapping->host);
struct gendisk *disk = bdev->bd_disk;
blk_mode_t mode = file_to_blk_mode(file);
switch (cmd) {
/* These need separate implementations for the data structure */
case HDIO_GETGEO:
return compat_hdio_getgeo(bdev, argp);
case BLKPG:
return compat_blkpg_ioctl(bdev, argp);
/* Compat mode returns 32-bit data instead of 'long' */
case BLKRAGET:
case BLKFRAGET:
if (!argp)
return -EINVAL;
return compat_put_long(argp,
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
return -EFBIG;
return compat_put_ulong(argp, bdev_nr_sectors(bdev));
/* The data is compatible, but the command number is different */
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return put_int(argp, bdev_logical_block_size(bdev));
case BLKBSZSET_32:
return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64_32:
return put_u64(argp, bdev_nr_bytes(bdev));
/* Incompatible alignment on i386 */
case BLKTRACESETUP32:
return blk_trace_ioctl(bdev, cmd, argp);
default:
break;
}
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
return ret;
}
#endif
struct blk_iou_cmd {
int res;
bool nowait;
};
static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
if (bic->res == -EAGAIN && bic->nowait)
io_uring_cmd_issue_blocking(cmd);
else
io_uring_cmd_done(cmd, bic->res, 0, issue_flags);
}
static void bio_cmd_bio_end_io(struct bio *bio)
{
struct io_uring_cmd *cmd = bio->bi_private;
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
if (unlikely(bio->bi_status) && !bic->res)
bic->res = blk_status_to_errno(bio->bi_status);
io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete);
bio_put(bio);
}
static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
struct block_device *bdev,
uint64_t start, uint64_t len, bool nowait)
{
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
sector_t sector = start >> SECTOR_SHIFT;
sector_t nr_sects = len >> SECTOR_SHIFT;
struct bio *prev = NULL, *bio;
int err;
if (!bdev_max_discard_sectors(bdev))
return -EOPNOTSUPP;
if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
return -EBADF;
if (bdev_read_only(bdev))
return -EPERM;
err = blk_validate_byte_range(bdev, start, len);
if (err)
return err;
err = filemap_invalidate_pages(bdev->bd_mapping, start,
start + len - 1, nowait);
if (err)
return err;
while (true) {
bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, gfp);
if (!bio)
break;
if (nowait) {
/*
* Don't allow multi-bio non-blocking submissions as
* subsequent bios may fail but we won't get a direct
* indication of that. Normally, the caller should
* retry from a blocking context.
*/
if (unlikely(nr_sects)) {
bio_put(bio);
return -EAGAIN;
}
bio->bi_opf |= REQ_NOWAIT;
}
prev = bio_chain_and_submit(prev, bio);
}
if (unlikely(!prev))
return -EAGAIN;
if (unlikely(nr_sects))
bic->res = -EAGAIN;
prev->bi_private = cmd;
prev->bi_end_io = bio_cmd_bio_end_io;
submit_bio(prev);
return -EIOCBQUEUED;
}
int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
const struct io_uring_sqe *sqe = cmd->sqe;
u32 cmd_op = cmd->cmd_op;
uint64_t start, len;
if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
sqe->rw_flags || sqe->file_index))
return -EINVAL;
bic->res = 0;
bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
start = READ_ONCE(sqe->addr);
len = READ_ONCE(sqe->addr3);
switch (cmd_op) {
case BLOCK_URING_CMD_DISCARD:
return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
}
return -EINVAL;
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_PP_H_
#define _ENIC_PP_H_
#define ENIC_PP_BY_INDEX(enic, vf, pp, err) \
do { \
if (enic_is_valid_pp_vf(enic, vf, err)) \
pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \
else \
pp = NULL; \
} while (0)
int enic_process_set_pp_request(struct enic *enic, int vf,
struct enic_port_profile *prev_pp, int *restore_pp);
int enic_process_get_pp_request(struct enic *enic, int vf,
int request, u16 *response);
int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err);
#endif /* _ENIC_PP_H_ */
|
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/netfilter/nf_tables.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_arp.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
#ifdef CONFIG_NF_TABLES_IPV4
static unsigned int nft_do_chain_ipv4(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv4(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_ipv4 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_IPV4,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
[NF_INET_LOCAL_OUT] = nft_do_chain_ipv4,
[NF_INET_FORWARD] = nft_do_chain_ipv4,
[NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
[NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
},
};
static void nft_chain_filter_ipv4_init(void)
{
nft_register_chain_type(&nft_chain_filter_ipv4);
}
static void nft_chain_filter_ipv4_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_ipv4);
}
#else
static inline void nft_chain_filter_ipv4_init(void) {}
static inline void nft_chain_filter_ipv4_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV4 */
#ifdef CONFIG_NF_TABLES_ARP
static unsigned int nft_do_chain_arp(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_unspec(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_ARP,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT),
.hooks = {
[NF_ARP_IN] = nft_do_chain_arp,
[NF_ARP_OUT] = nft_do_chain_arp,
},
};
static void nft_chain_filter_arp_init(void)
{
nft_register_chain_type(&nft_chain_filter_arp);
}
static void nft_chain_filter_arp_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_arp);
}
#else
static inline void nft_chain_filter_arp_init(void) {}
static inline void nft_chain_filter_arp_fini(void) {}
#endif /* CONFIG_NF_TABLES_ARP */
#ifdef CONFIG_NF_TABLES_IPV6
static unsigned int nft_do_chain_ipv6(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv6(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_ipv6 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_IPV6,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
[NF_INET_LOCAL_OUT] = nft_do_chain_ipv6,
[NF_INET_FORWARD] = nft_do_chain_ipv6,
[NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
[NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
},
};
static void nft_chain_filter_ipv6_init(void)
{
nft_register_chain_type(&nft_chain_filter_ipv6);
}
static void nft_chain_filter_ipv6_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_ipv6);
}
#else
static inline void nft_chain_filter_ipv6_init(void) {}
static inline void nft_chain_filter_ipv6_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV6 */
#ifdef CONFIG_NF_TABLES_INET
static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (state->pf) {
case NFPROTO_IPV4:
nft_set_pktinfo_ipv4(&pkt);
break;
case NFPROTO_IPV6:
nft_set_pktinfo_ipv6(&pkt);
break;
default:
break;
}
return nft_do_chain(&pkt, priv);
}
static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_hook_state ingress_state = *state;
struct nft_pktinfo pkt;
switch (skb->protocol) {
case htons(ETH_P_IP):
/* Original hook is NFPROTO_NETDEV and NF_NETDEV_INGRESS. */
ingress_state.pf = NFPROTO_IPV4;
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
if (nft_set_pktinfo_ipv4_ingress(&pkt) < 0)
return NF_DROP;
break;
case htons(ETH_P_IPV6):
ingress_state.pf = NFPROTO_IPV6;
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
if (nft_set_pktinfo_ipv6_ingress(&pkt) < 0)
return NF_DROP;
break;
default:
return NF_ACCEPT;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_inet = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_INET,
.hook_mask = (1 << NF_INET_INGRESS) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_INGRESS] = nft_do_chain_inet_ingress,
[NF_INET_LOCAL_IN] = nft_do_chain_inet,
[NF_INET_LOCAL_OUT] = nft_do_chain_inet,
[NF_INET_FORWARD] = nft_do_chain_inet,
[NF_INET_PRE_ROUTING] = nft_do_chain_inet,
[NF_INET_POST_ROUTING] = nft_do_chain_inet,
},
};
static void nft_chain_filter_inet_init(void)
{
nft_register_chain_type(&nft_chain_filter_inet);
}
static void nft_chain_filter_inet_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_inet);
}
#else
static inline void nft_chain_filter_inet_init(void) {}
static inline void nft_chain_filter_inet_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV6 */
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
static unsigned int
nft_do_chain_bridge(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
nft_set_pktinfo_unspec(&pkt);
break;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_BRIDGE,
.hook_mask = (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT) |
(1 << NF_BR_POST_ROUTING),
.hooks = {
[NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
[NF_BR_LOCAL_IN] = nft_do_chain_bridge,
[NF_BR_FORWARD] = nft_do_chain_bridge,
[NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
[NF_BR_POST_ROUTING] = nft_do_chain_bridge,
},
};
static void nft_chain_filter_bridge_init(void)
{
nft_register_chain_type(&nft_chain_filter_bridge);
}
static void nft_chain_filter_bridge_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_bridge);
}
#else
static inline void nft_chain_filter_bridge_init(void) {}
static inline void nft_chain_filter_bridge_fini(void) {}
#endif /* CONFIG_NF_TABLES_BRIDGE */
#ifdef CONFIG_NF_TABLES_NETDEV
static unsigned int nft_do_chain_netdev(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (skb->protocol) {
case htons(ETH_P_IP):
nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
nft_set_pktinfo_unspec(&pkt);
break;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_netdev = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_NETDEV,
.hook_mask = (1 << NF_NETDEV_INGRESS) |
(1 << NF_NETDEV_EGRESS),
.hooks = {
[NF_NETDEV_INGRESS] = nft_do_chain_netdev,
[NF_NETDEV_EGRESS] = nft_do_chain_netdev,
},
};
static void nft_netdev_event(unsigned long event, struct net_device *dev,
struct nft_ctx *ctx)
{
struct nft_base_chain *basechain = nft_base_chain(ctx->chain);
struct nft_hook *hook, *found = NULL;
int n = 0;
list_for_each_entry(hook, &basechain->hook_list, list) {
if (hook->ops.dev == dev)
found = hook;
n++;
}
if (!found)
return;
if (n > 1) {
if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
nf_unregister_net_hook(ctx->net, &found->ops);
list_del_rcu(&found->list);
kfree_rcu(found, rcu);
return;
}
/* UNREGISTER events are also happening on netns exit.
*
* Although nf_tables core releases all tables/chains, only this event
* handler provides guarantee that hook->ops.dev is still accessible,
* so we cannot skip exiting net namespaces.
*/
__nft_release_basechain(ctx);
}
static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nft_base_chain *basechain;
struct nftables_pernet *nft_net;
struct nft_chain *chain, *nr;
struct nft_table *table;
struct nft_ctx ctx = {
.net = dev_net(dev),
};
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
nft_net = nft_pernet(ctx.net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV &&
table->family != NFPROTO_INET)
continue;
ctx.family = table->family;
ctx.table = table;
list_for_each_entry_safe(chain, nr, &table->chains, list) {
if (!nft_is_base_chain(chain))
continue;
basechain = nft_base_chain(chain);
if (table->family == NFPROTO_INET &&
basechain->ops.hooknum != NF_INET_INGRESS)
continue;
ctx.chain = chain;
nft_netdev_event(event, dev, &ctx);
}
}
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nf_tables_netdev_notifier = {
.notifier_call = nf_tables_netdev_event,
};
static int nft_chain_filter_netdev_init(void)
{
int err;
nft_register_chain_type(&nft_chain_filter_netdev);
err = register_netdevice_notifier(&nf_tables_netdev_notifier);
if (err)
goto err_register_netdevice_notifier;
return 0;
err_register_netdevice_notifier:
nft_unregister_chain_type(&nft_chain_filter_netdev);
return err;
}
static void nft_chain_filter_netdev_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_netdev);
unregister_netdevice_notifier(&nf_tables_netdev_notifier);
}
#else
static inline int nft_chain_filter_netdev_init(void) { return 0; }
static inline void nft_chain_filter_netdev_fini(void) {}
#endif /* CONFIG_NF_TABLES_NETDEV */
int __init nft_chain_filter_init(void)
{
int err;
err = nft_chain_filter_netdev_init();
if (err < 0)
return err;
nft_chain_filter_ipv4_init();
nft_chain_filter_ipv6_init();
nft_chain_filter_arp_init();
nft_chain_filter_inet_init();
nft_chain_filter_bridge_init();
return 0;
}
void nft_chain_filter_fini(void)
{
nft_chain_filter_bridge_fini();
nft_chain_filter_inet_fini();
nft_chain_filter_arp_fini();
nft_chain_filter_ipv6_fini();
nft_chain_filter_ipv4_fini();
nft_chain_filter_netdev_fini();
}
|
/*
* Copyright (C) 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _sdma0_4_2_2_OFFSET_HEADER
#define _sdma0_4_2_2_OFFSET_HEADER
// addressBlock: sdma0_sdma0dec
// base address: 0x4980
#define mmSDMA0_UCODE_ADDR 0x0000
#define mmSDMA0_UCODE_ADDR_BASE_IDX 0
#define mmSDMA0_UCODE_DATA 0x0001
#define mmSDMA0_UCODE_DATA_BASE_IDX 0
#define mmSDMA0_VM_CNTL 0x0004
#define mmSDMA0_VM_CNTL_BASE_IDX 0
#define mmSDMA0_VM_CTX_LO 0x0005
#define mmSDMA0_VM_CTX_LO_BASE_IDX 0
#define mmSDMA0_VM_CTX_HI 0x0006
#define mmSDMA0_VM_CTX_HI_BASE_IDX 0
#define mmSDMA0_ACTIVE_FCN_ID 0x0007
#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0
#define mmSDMA0_VM_CTX_CNTL 0x0008
#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0
#define mmSDMA0_VIRT_RESET_REQ 0x0009
#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0
#define mmSDMA0_VF_ENABLE 0x000a
#define mmSDMA0_VF_ENABLE_BASE_IDX 0
#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b
#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0
#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c
#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0
#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d
#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0
#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e
#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0
#define mmSDMA0_PUB_REG_TYPE0 0x000f
#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0
#define mmSDMA0_PUB_REG_TYPE1 0x0010
#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0
#define mmSDMA0_PUB_REG_TYPE2 0x0011
#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0
#define mmSDMA0_PUB_REG_TYPE3 0x0012
#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0
#define mmSDMA0_MMHUB_CNTL 0x0013
#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0
#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019
#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
#define mmSDMA0_POWER_CNTL 0x001a
#define mmSDMA0_POWER_CNTL_BASE_IDX 0
#define mmSDMA0_CLK_CTRL 0x001b
#define mmSDMA0_CLK_CTRL_BASE_IDX 0
#define mmSDMA0_CNTL 0x001c
#define mmSDMA0_CNTL_BASE_IDX 0
#define mmSDMA0_CHICKEN_BITS 0x001d
#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0
#define mmSDMA0_GB_ADDR_CONFIG 0x001e
#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f
#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020
#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
#define mmSDMA0_RB_RPTR_FETCH 0x0022
#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0
#define mmSDMA0_IB_OFFSET_FETCH 0x0023
#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
#define mmSDMA0_PROGRAM 0x0024
#define mmSDMA0_PROGRAM_BASE_IDX 0
#define mmSDMA0_STATUS_REG 0x0025
#define mmSDMA0_STATUS_REG_BASE_IDX 0
#define mmSDMA0_STATUS1_REG 0x0026
#define mmSDMA0_STATUS1_REG_BASE_IDX 0
#define mmSDMA0_RD_BURST_CNTL 0x0027
#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0
#define mmSDMA0_HBM_PAGE_CONFIG 0x0028
#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
#define mmSDMA0_UCODE_CHECKSUM 0x0029
#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0
#define mmSDMA0_F32_CNTL 0x002a
#define mmSDMA0_F32_CNTL_BASE_IDX 0
#define mmSDMA0_FREEZE 0x002b
#define mmSDMA0_FREEZE_BASE_IDX 0
#define mmSDMA0_PHASE0_QUANTUM 0x002c
#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0
#define mmSDMA0_PHASE1_QUANTUM 0x002d
#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0
#define mmSDMA_POWER_GATING 0x002e
#define mmSDMA_POWER_GATING_BASE_IDX 0
#define mmSDMA_PGFSM_CONFIG 0x002f
#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0
#define mmSDMA_PGFSM_WRITE 0x0030
#define mmSDMA_PGFSM_WRITE_BASE_IDX 0
#define mmSDMA_PGFSM_READ 0x0031
#define mmSDMA_PGFSM_READ_BASE_IDX 0
#define mmSDMA0_EDC_CONFIG 0x0032
#define mmSDMA0_EDC_CONFIG_BASE_IDX 0
#define mmSDMA0_BA_THRESHOLD 0x0033
#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0
#define mmSDMA0_ID 0x0034
#define mmSDMA0_ID_BASE_IDX 0
#define mmSDMA0_VERSION 0x0035
#define mmSDMA0_VERSION_BASE_IDX 0
#define mmSDMA0_EDC_COUNTER 0x0036
#define mmSDMA0_EDC_COUNTER_BASE_IDX 0
#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037
#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
#define mmSDMA0_STATUS2_REG 0x0038
#define mmSDMA0_STATUS2_REG_BASE_IDX 0
#define mmSDMA0_ATOMIC_CNTL 0x0039
#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0
#define mmSDMA0_ATOMIC_PREOP_LO 0x003a
#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
#define mmSDMA0_ATOMIC_PREOP_HI 0x003b
#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
#define mmSDMA0_UTCL1_CNTL 0x003c
#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0
#define mmSDMA0_UTCL1_WATERMK 0x003d
#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0
#define mmSDMA0_UTCL1_RD_STATUS 0x003e
#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
#define mmSDMA0_UTCL1_WR_STATUS 0x003f
#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
#define mmSDMA0_UTCL1_INV0 0x0040
#define mmSDMA0_UTCL1_INV0_BASE_IDX 0
#define mmSDMA0_UTCL1_INV1 0x0041
#define mmSDMA0_UTCL1_INV1_BASE_IDX 0
#define mmSDMA0_UTCL1_INV2 0x0042
#define mmSDMA0_UTCL1_INV2_BASE_IDX 0
#define mmSDMA0_UTCL1_RD_XNACK0 0x0043
#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
#define mmSDMA0_UTCL1_RD_XNACK1 0x0044
#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
#define mmSDMA0_UTCL1_WR_XNACK0 0x0045
#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
#define mmSDMA0_UTCL1_WR_XNACK1 0x0046
#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
#define mmSDMA0_UTCL1_TIMEOUT 0x0047
#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
#define mmSDMA0_UTCL1_PAGE 0x0048
#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0
#define mmSDMA0_POWER_CNTL_IDLE 0x0049
#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0
#define mmSDMA0_RELAX_ORDERING_LUT 0x004a
#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
#define mmSDMA0_CHICKEN_BITS_2 0x004b
#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0
#define mmSDMA0_STATUS3_REG 0x004c
#define mmSDMA0_STATUS3_REG_BASE_IDX 0
#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d
#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e
#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_PHASE2_QUANTUM 0x004f
#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0
#define mmSDMA0_ERROR_LOG 0x0050
#define mmSDMA0_ERROR_LOG_BASE_IDX 0
#define mmSDMA0_PUB_DUMMY_REG0 0x0051
#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
#define mmSDMA0_PUB_DUMMY_REG1 0x0052
#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
#define mmSDMA0_PUB_DUMMY_REG2 0x0053
#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
#define mmSDMA0_PUB_DUMMY_REG3 0x0054
#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
#define mmSDMA0_F32_COUNTER 0x0055
#define mmSDMA0_F32_COUNTER_BASE_IDX 0
#define mmSDMA0_UNBREAKABLE 0x0056
#define mmSDMA0_UNBREAKABLE_BASE_IDX 0
#define mmSDMA0_PERFMON_CNTL 0x0057
#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0
#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058
#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0
#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059
#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0
#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
#define mmSDMA0_CRD_CNTL 0x005b
#define mmSDMA0_CRD_CNTL_BASE_IDX 0
#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
#define mmSDMA0_ULV_CNTL 0x005e
#define mmSDMA0_ULV_CNTL_BASE_IDX 0
#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060
#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061
#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
#define mmSDMA0_GPU_IOV_VIOLATION_LOG2 0x0062
#define mmSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
#define mmSDMA0_GFX_RB_CNTL 0x0080
#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_RB_BASE 0x0081
#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0
#define mmSDMA0_GFX_RB_BASE_HI 0x0082
#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_GFX_RB_RPTR 0x0083
#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0
#define mmSDMA0_GFX_RB_RPTR_HI 0x0084
#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_GFX_RB_WPTR 0x0085
#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0
#define mmSDMA0_GFX_RB_WPTR_HI 0x0086
#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087
#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088
#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089
#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_GFX_IB_CNTL 0x008a
#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_IB_RPTR 0x008b
#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0
#define mmSDMA0_GFX_IB_OFFSET 0x008c
#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_GFX_IB_BASE_LO 0x008d
#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_GFX_IB_BASE_HI 0x008e
#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_GFX_IB_SIZE 0x008f
#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0
#define mmSDMA0_GFX_SKIP_CNTL 0x0090
#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091
#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_GFX_DOORBELL 0x0092
#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0
#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093
#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_STATUS 0x00a8
#define mmSDMA0_GFX_STATUS_BASE_IDX 0
#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9
#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_GFX_WATERMARK 0x00aa
#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0
#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab
#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac
#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad
#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af
#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_GFX_PREEMPT 0x00b0
#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0
#define mmSDMA0_GFX_DUMMY_REG 0x00b1
#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4
#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5
#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0
#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1
#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2
#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3
#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4
#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5
#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6
#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7
#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8
#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9
#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_RB_CNTL 0x00d8
#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_RB_BASE 0x00d9
#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0
#define mmSDMA0_PAGE_RB_BASE_HI 0x00da
#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_PAGE_RB_RPTR 0x00db
#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0
#define mmSDMA0_PAGE_RB_RPTR_HI 0x00dc
#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_PAGE_RB_WPTR 0x00dd
#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0
#define mmSDMA0_PAGE_RB_WPTR_HI 0x00de
#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00df
#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e0
#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e1
#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_PAGE_IB_CNTL 0x00e2
#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_IB_RPTR 0x00e3
#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0
#define mmSDMA0_PAGE_IB_OFFSET 0x00e4
#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_PAGE_IB_BASE_LO 0x00e5
#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_PAGE_IB_BASE_HI 0x00e6
#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_PAGE_IB_SIZE 0x00e7
#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0
#define mmSDMA0_PAGE_SKIP_CNTL 0x00e8
#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00e9
#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_PAGE_DOORBELL 0x00ea
#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0
#define mmSDMA0_PAGE_STATUS 0x0100
#define mmSDMA0_PAGE_STATUS_BASE_IDX 0
#define mmSDMA0_PAGE_DOORBELL_LOG 0x0101
#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_PAGE_WATERMARK 0x0102
#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0
#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x0103
#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_PAGE_CSA_ADDR_LO 0x0104
#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_PAGE_CSA_ADDR_HI 0x0105
#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x0107
#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_PAGE_PREEMPT 0x0108
#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0
#define mmSDMA0_PAGE_DUMMY_REG 0x0109
#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_PAGE_RB_AQL_CNTL 0x010c
#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x010d
#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0118
#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0119
#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA2 0x011a
#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA3 0x011b
#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA4 0x011c
#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA5 0x011d
#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA6 0x011e
#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA7 0x011f
#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0120
#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0121
#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_RB_CNTL 0x0130
#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_RB_BASE 0x0131
#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC0_RB_BASE_HI 0x0132
#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC0_RB_RPTR 0x0133
#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC0_RB_RPTR_HI 0x0134
#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC0_RB_WPTR 0x0135
#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC0_RB_WPTR_HI 0x0136
#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0137
#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0138
#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0139
#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC0_IB_CNTL 0x013a
#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_IB_RPTR 0x013b
#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC0_IB_OFFSET 0x013c
#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC0_IB_BASE_LO 0x013d
#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC0_IB_BASE_HI 0x013e
#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC0_IB_SIZE 0x013f
#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC0_SKIP_CNTL 0x0140
#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0141
#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC0_DOORBELL 0x0142
#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC0_STATUS 0x0158
#define mmSDMA0_RLC0_STATUS_BASE_IDX 0
#define mmSDMA0_RLC0_DOORBELL_LOG 0x0159
#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC0_WATERMARK 0x015a
#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x015b
#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC0_CSA_ADDR_LO 0x015c
#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC0_CSA_ADDR_HI 0x015d
#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x015f
#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC0_PREEMPT 0x0160
#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC0_DUMMY_REG 0x0161
#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0164
#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0165
#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0170
#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0171
#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0172
#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0173
#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0174
#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0175
#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0176
#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0177
#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0178
#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0179
#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_RB_CNTL 0x0188
#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_RB_BASE 0x0189
#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC1_RB_BASE_HI 0x018a
#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC1_RB_RPTR 0x018b
#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC1_RB_RPTR_HI 0x018c
#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC1_RB_WPTR 0x018d
#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC1_RB_WPTR_HI 0x018e
#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x018f
#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x0190
#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x0191
#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC1_IB_CNTL 0x0192
#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_IB_RPTR 0x0193
#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC1_IB_OFFSET 0x0194
#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC1_IB_BASE_LO 0x0195
#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC1_IB_BASE_HI 0x0196
#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC1_IB_SIZE 0x0197
#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC1_SKIP_CNTL 0x0198
#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_CONTEXT_STATUS 0x0199
#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC1_DOORBELL 0x019a
#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC1_STATUS 0x01b0
#define mmSDMA0_RLC1_STATUS_BASE_IDX 0
#define mmSDMA0_RLC1_DOORBELL_LOG 0x01b1
#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC1_WATERMARK 0x01b2
#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01b3
#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01b4
#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01b5
#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01b7
#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC1_PREEMPT 0x01b8
#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC1_DUMMY_REG 0x01b9
#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01bc
#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01bd
#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01c8
#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01c9
#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01ca
#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01cb
#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01cc
#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01cd
#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01ce
#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01cf
#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01d0
#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01d1
#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_RB_CNTL 0x01e0
#define mmSDMA0_RLC2_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_RB_BASE 0x01e1
#define mmSDMA0_RLC2_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC2_RB_BASE_HI 0x01e2
#define mmSDMA0_RLC2_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC2_RB_RPTR 0x01e3
#define mmSDMA0_RLC2_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC2_RB_RPTR_HI 0x01e4
#define mmSDMA0_RLC2_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC2_RB_WPTR 0x01e5
#define mmSDMA0_RLC2_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC2_RB_WPTR_HI 0x01e6
#define mmSDMA0_RLC2_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL 0x01e7
#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI 0x01e8
#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO 0x01e9
#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC2_IB_CNTL 0x01ea
#define mmSDMA0_RLC2_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_IB_RPTR 0x01eb
#define mmSDMA0_RLC2_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC2_IB_OFFSET 0x01ec
#define mmSDMA0_RLC2_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC2_IB_BASE_LO 0x01ed
#define mmSDMA0_RLC2_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC2_IB_BASE_HI 0x01ee
#define mmSDMA0_RLC2_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC2_IB_SIZE 0x01ef
#define mmSDMA0_RLC2_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC2_SKIP_CNTL 0x01f0
#define mmSDMA0_RLC2_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_CONTEXT_STATUS 0x01f1
#define mmSDMA0_RLC2_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC2_DOORBELL 0x01f2
#define mmSDMA0_RLC2_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC2_STATUS 0x0208
#define mmSDMA0_RLC2_STATUS_BASE_IDX 0
#define mmSDMA0_RLC2_DOORBELL_LOG 0x0209
#define mmSDMA0_RLC2_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC2_WATERMARK 0x020a
#define mmSDMA0_RLC2_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC2_DOORBELL_OFFSET 0x020b
#define mmSDMA0_RLC2_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC2_CSA_ADDR_LO 0x020c
#define mmSDMA0_RLC2_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC2_CSA_ADDR_HI 0x020d
#define mmSDMA0_RLC2_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC2_IB_SUB_REMAIN 0x020f
#define mmSDMA0_RLC2_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC2_PREEMPT 0x0210
#define mmSDMA0_RLC2_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC2_DUMMY_REG 0x0211
#define mmSDMA0_RLC2_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC2_RB_AQL_CNTL 0x0214
#define mmSDMA0_RLC2_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC2_MINOR_PTR_UPDATE 0x0215
#define mmSDMA0_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA0 0x0220
#define mmSDMA0_RLC2_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA1 0x0221
#define mmSDMA0_RLC2_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA2 0x0222
#define mmSDMA0_RLC2_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA3 0x0223
#define mmSDMA0_RLC2_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA4 0x0224
#define mmSDMA0_RLC2_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA5 0x0225
#define mmSDMA0_RLC2_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA6 0x0226
#define mmSDMA0_RLC2_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA7 0x0227
#define mmSDMA0_RLC2_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_DATA8 0x0228
#define mmSDMA0_RLC2_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC2_MIDCMD_CNTL 0x0229
#define mmSDMA0_RLC2_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_RB_CNTL 0x0238
#define mmSDMA0_RLC3_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_RB_BASE 0x0239
#define mmSDMA0_RLC3_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC3_RB_BASE_HI 0x023a
#define mmSDMA0_RLC3_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC3_RB_RPTR 0x023b
#define mmSDMA0_RLC3_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC3_RB_RPTR_HI 0x023c
#define mmSDMA0_RLC3_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC3_RB_WPTR 0x023d
#define mmSDMA0_RLC3_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC3_RB_WPTR_HI 0x023e
#define mmSDMA0_RLC3_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL 0x023f
#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI 0x0240
#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO 0x0241
#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC3_IB_CNTL 0x0242
#define mmSDMA0_RLC3_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_IB_RPTR 0x0243
#define mmSDMA0_RLC3_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC3_IB_OFFSET 0x0244
#define mmSDMA0_RLC3_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC3_IB_BASE_LO 0x0245
#define mmSDMA0_RLC3_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC3_IB_BASE_HI 0x0246
#define mmSDMA0_RLC3_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC3_IB_SIZE 0x0247
#define mmSDMA0_RLC3_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC3_SKIP_CNTL 0x0248
#define mmSDMA0_RLC3_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_CONTEXT_STATUS 0x0249
#define mmSDMA0_RLC3_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC3_DOORBELL 0x024a
#define mmSDMA0_RLC3_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC3_STATUS 0x0260
#define mmSDMA0_RLC3_STATUS_BASE_IDX 0
#define mmSDMA0_RLC3_DOORBELL_LOG 0x0261
#define mmSDMA0_RLC3_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC3_WATERMARK 0x0262
#define mmSDMA0_RLC3_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC3_DOORBELL_OFFSET 0x0263
#define mmSDMA0_RLC3_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC3_CSA_ADDR_LO 0x0264
#define mmSDMA0_RLC3_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC3_CSA_ADDR_HI 0x0265
#define mmSDMA0_RLC3_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC3_IB_SUB_REMAIN 0x0267
#define mmSDMA0_RLC3_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC3_PREEMPT 0x0268
#define mmSDMA0_RLC3_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC3_DUMMY_REG 0x0269
#define mmSDMA0_RLC3_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC3_RB_AQL_CNTL 0x026c
#define mmSDMA0_RLC3_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC3_MINOR_PTR_UPDATE 0x026d
#define mmSDMA0_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA0 0x0278
#define mmSDMA0_RLC3_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA1 0x0279
#define mmSDMA0_RLC3_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA2 0x027a
#define mmSDMA0_RLC3_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA3 0x027b
#define mmSDMA0_RLC3_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA4 0x027c
#define mmSDMA0_RLC3_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA5 0x027d
#define mmSDMA0_RLC3_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA6 0x027e
#define mmSDMA0_RLC3_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA7 0x027f
#define mmSDMA0_RLC3_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_DATA8 0x0280
#define mmSDMA0_RLC3_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC3_MIDCMD_CNTL 0x0281
#define mmSDMA0_RLC3_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_RB_CNTL 0x0290
#define mmSDMA0_RLC4_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_RB_BASE 0x0291
#define mmSDMA0_RLC4_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC4_RB_BASE_HI 0x0292
#define mmSDMA0_RLC4_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC4_RB_RPTR 0x0293
#define mmSDMA0_RLC4_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC4_RB_RPTR_HI 0x0294
#define mmSDMA0_RLC4_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC4_RB_WPTR 0x0295
#define mmSDMA0_RLC4_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC4_RB_WPTR_HI 0x0296
#define mmSDMA0_RLC4_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL 0x0297
#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI 0x0298
#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO 0x0299
#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC4_IB_CNTL 0x029a
#define mmSDMA0_RLC4_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_IB_RPTR 0x029b
#define mmSDMA0_RLC4_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC4_IB_OFFSET 0x029c
#define mmSDMA0_RLC4_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC4_IB_BASE_LO 0x029d
#define mmSDMA0_RLC4_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC4_IB_BASE_HI 0x029e
#define mmSDMA0_RLC4_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC4_IB_SIZE 0x029f
#define mmSDMA0_RLC4_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC4_SKIP_CNTL 0x02a0
#define mmSDMA0_RLC4_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_CONTEXT_STATUS 0x02a1
#define mmSDMA0_RLC4_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC4_DOORBELL 0x02a2
#define mmSDMA0_RLC4_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC4_STATUS 0x02b8
#define mmSDMA0_RLC4_STATUS_BASE_IDX 0
#define mmSDMA0_RLC4_DOORBELL_LOG 0x02b9
#define mmSDMA0_RLC4_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC4_WATERMARK 0x02ba
#define mmSDMA0_RLC4_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC4_DOORBELL_OFFSET 0x02bb
#define mmSDMA0_RLC4_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC4_CSA_ADDR_LO 0x02bc
#define mmSDMA0_RLC4_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC4_CSA_ADDR_HI 0x02bd
#define mmSDMA0_RLC4_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC4_IB_SUB_REMAIN 0x02bf
#define mmSDMA0_RLC4_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC4_PREEMPT 0x02c0
#define mmSDMA0_RLC4_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC4_DUMMY_REG 0x02c1
#define mmSDMA0_RLC4_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC4_RB_AQL_CNTL 0x02c4
#define mmSDMA0_RLC4_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC4_MINOR_PTR_UPDATE 0x02c5
#define mmSDMA0_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA0 0x02d0
#define mmSDMA0_RLC4_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA1 0x02d1
#define mmSDMA0_RLC4_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA2 0x02d2
#define mmSDMA0_RLC4_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA3 0x02d3
#define mmSDMA0_RLC4_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA4 0x02d4
#define mmSDMA0_RLC4_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA5 0x02d5
#define mmSDMA0_RLC4_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA6 0x02d6
#define mmSDMA0_RLC4_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA7 0x02d7
#define mmSDMA0_RLC4_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_DATA8 0x02d8
#define mmSDMA0_RLC4_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC4_MIDCMD_CNTL 0x02d9
#define mmSDMA0_RLC4_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_RB_CNTL 0x02e8
#define mmSDMA0_RLC5_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_RB_BASE 0x02e9
#define mmSDMA0_RLC5_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC5_RB_BASE_HI 0x02ea
#define mmSDMA0_RLC5_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC5_RB_RPTR 0x02eb
#define mmSDMA0_RLC5_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC5_RB_RPTR_HI 0x02ec
#define mmSDMA0_RLC5_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC5_RB_WPTR 0x02ed
#define mmSDMA0_RLC5_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC5_RB_WPTR_HI 0x02ee
#define mmSDMA0_RLC5_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL 0x02ef
#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI 0x02f0
#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO 0x02f1
#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC5_IB_CNTL 0x02f2
#define mmSDMA0_RLC5_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_IB_RPTR 0x02f3
#define mmSDMA0_RLC5_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC5_IB_OFFSET 0x02f4
#define mmSDMA0_RLC5_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC5_IB_BASE_LO 0x02f5
#define mmSDMA0_RLC5_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC5_IB_BASE_HI 0x02f6
#define mmSDMA0_RLC5_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC5_IB_SIZE 0x02f7
#define mmSDMA0_RLC5_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC5_SKIP_CNTL 0x02f8
#define mmSDMA0_RLC5_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_CONTEXT_STATUS 0x02f9
#define mmSDMA0_RLC5_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC5_DOORBELL 0x02fa
#define mmSDMA0_RLC5_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC5_STATUS 0x0310
#define mmSDMA0_RLC5_STATUS_BASE_IDX 0
#define mmSDMA0_RLC5_DOORBELL_LOG 0x0311
#define mmSDMA0_RLC5_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC5_WATERMARK 0x0312
#define mmSDMA0_RLC5_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC5_DOORBELL_OFFSET 0x0313
#define mmSDMA0_RLC5_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC5_CSA_ADDR_LO 0x0314
#define mmSDMA0_RLC5_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC5_CSA_ADDR_HI 0x0315
#define mmSDMA0_RLC5_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC5_IB_SUB_REMAIN 0x0317
#define mmSDMA0_RLC5_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC5_PREEMPT 0x0318
#define mmSDMA0_RLC5_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC5_DUMMY_REG 0x0319
#define mmSDMA0_RLC5_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC5_RB_AQL_CNTL 0x031c
#define mmSDMA0_RLC5_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC5_MINOR_PTR_UPDATE 0x031d
#define mmSDMA0_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA0 0x0328
#define mmSDMA0_RLC5_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA1 0x0329
#define mmSDMA0_RLC5_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA2 0x032a
#define mmSDMA0_RLC5_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA3 0x032b
#define mmSDMA0_RLC5_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA4 0x032c
#define mmSDMA0_RLC5_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA5 0x032d
#define mmSDMA0_RLC5_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA6 0x032e
#define mmSDMA0_RLC5_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA7 0x032f
#define mmSDMA0_RLC5_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_DATA8 0x0330
#define mmSDMA0_RLC5_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC5_MIDCMD_CNTL 0x0331
#define mmSDMA0_RLC5_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_RB_CNTL 0x0340
#define mmSDMA0_RLC6_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_RB_BASE 0x0341
#define mmSDMA0_RLC6_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC6_RB_BASE_HI 0x0342
#define mmSDMA0_RLC6_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC6_RB_RPTR 0x0343
#define mmSDMA0_RLC6_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC6_RB_RPTR_HI 0x0344
#define mmSDMA0_RLC6_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC6_RB_WPTR 0x0345
#define mmSDMA0_RLC6_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC6_RB_WPTR_HI 0x0346
#define mmSDMA0_RLC6_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL 0x0347
#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI 0x0348
#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO 0x0349
#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC6_IB_CNTL 0x034a
#define mmSDMA0_RLC6_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_IB_RPTR 0x034b
#define mmSDMA0_RLC6_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC6_IB_OFFSET 0x034c
#define mmSDMA0_RLC6_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC6_IB_BASE_LO 0x034d
#define mmSDMA0_RLC6_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC6_IB_BASE_HI 0x034e
#define mmSDMA0_RLC6_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC6_IB_SIZE 0x034f
#define mmSDMA0_RLC6_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC6_SKIP_CNTL 0x0350
#define mmSDMA0_RLC6_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_CONTEXT_STATUS 0x0351
#define mmSDMA0_RLC6_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC6_DOORBELL 0x0352
#define mmSDMA0_RLC6_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC6_STATUS 0x0368
#define mmSDMA0_RLC6_STATUS_BASE_IDX 0
#define mmSDMA0_RLC6_DOORBELL_LOG 0x0369
#define mmSDMA0_RLC6_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC6_WATERMARK 0x036a
#define mmSDMA0_RLC6_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC6_DOORBELL_OFFSET 0x036b
#define mmSDMA0_RLC6_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC6_CSA_ADDR_LO 0x036c
#define mmSDMA0_RLC6_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC6_CSA_ADDR_HI 0x036d
#define mmSDMA0_RLC6_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC6_IB_SUB_REMAIN 0x036f
#define mmSDMA0_RLC6_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC6_PREEMPT 0x0370
#define mmSDMA0_RLC6_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC6_DUMMY_REG 0x0371
#define mmSDMA0_RLC6_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC6_RB_AQL_CNTL 0x0374
#define mmSDMA0_RLC6_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC6_MINOR_PTR_UPDATE 0x0375
#define mmSDMA0_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA0 0x0380
#define mmSDMA0_RLC6_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA1 0x0381
#define mmSDMA0_RLC6_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA2 0x0382
#define mmSDMA0_RLC6_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA3 0x0383
#define mmSDMA0_RLC6_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA4 0x0384
#define mmSDMA0_RLC6_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA5 0x0385
#define mmSDMA0_RLC6_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA6 0x0386
#define mmSDMA0_RLC6_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA7 0x0387
#define mmSDMA0_RLC6_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_DATA8 0x0388
#define mmSDMA0_RLC6_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC6_MIDCMD_CNTL 0x0389
#define mmSDMA0_RLC6_MIDCMD_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_RB_CNTL 0x0398
#define mmSDMA0_RLC7_RB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_RB_BASE 0x0399
#define mmSDMA0_RLC7_RB_BASE_BASE_IDX 0
#define mmSDMA0_RLC7_RB_BASE_HI 0x039a
#define mmSDMA0_RLC7_RB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC7_RB_RPTR 0x039b
#define mmSDMA0_RLC7_RB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC7_RB_RPTR_HI 0x039c
#define mmSDMA0_RLC7_RB_RPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC7_RB_WPTR 0x039d
#define mmSDMA0_RLC7_RB_WPTR_BASE_IDX 0
#define mmSDMA0_RLC7_RB_WPTR_HI 0x039e
#define mmSDMA0_RLC7_RB_WPTR_HI_BASE_IDX 0
#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL 0x039f
#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI 0x03a0
#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO 0x03a1
#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC7_IB_CNTL 0x03a2
#define mmSDMA0_RLC7_IB_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_IB_RPTR 0x03a3
#define mmSDMA0_RLC7_IB_RPTR_BASE_IDX 0
#define mmSDMA0_RLC7_IB_OFFSET 0x03a4
#define mmSDMA0_RLC7_IB_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC7_IB_BASE_LO 0x03a5
#define mmSDMA0_RLC7_IB_BASE_LO_BASE_IDX 0
#define mmSDMA0_RLC7_IB_BASE_HI 0x03a6
#define mmSDMA0_RLC7_IB_BASE_HI_BASE_IDX 0
#define mmSDMA0_RLC7_IB_SIZE 0x03a7
#define mmSDMA0_RLC7_IB_SIZE_BASE_IDX 0
#define mmSDMA0_RLC7_SKIP_CNTL 0x03a8
#define mmSDMA0_RLC7_SKIP_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_CONTEXT_STATUS 0x03a9
#define mmSDMA0_RLC7_CONTEXT_STATUS_BASE_IDX 0
#define mmSDMA0_RLC7_DOORBELL 0x03aa
#define mmSDMA0_RLC7_DOORBELL_BASE_IDX 0
#define mmSDMA0_RLC7_STATUS 0x03c0
#define mmSDMA0_RLC7_STATUS_BASE_IDX 0
#define mmSDMA0_RLC7_DOORBELL_LOG 0x03c1
#define mmSDMA0_RLC7_DOORBELL_LOG_BASE_IDX 0
#define mmSDMA0_RLC7_WATERMARK 0x03c2
#define mmSDMA0_RLC7_WATERMARK_BASE_IDX 0
#define mmSDMA0_RLC7_DOORBELL_OFFSET 0x03c3
#define mmSDMA0_RLC7_DOORBELL_OFFSET_BASE_IDX 0
#define mmSDMA0_RLC7_CSA_ADDR_LO 0x03c4
#define mmSDMA0_RLC7_CSA_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC7_CSA_ADDR_HI 0x03c5
#define mmSDMA0_RLC7_CSA_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC7_IB_SUB_REMAIN 0x03c7
#define mmSDMA0_RLC7_IB_SUB_REMAIN_BASE_IDX 0
#define mmSDMA0_RLC7_PREEMPT 0x03c8
#define mmSDMA0_RLC7_PREEMPT_BASE_IDX 0
#define mmSDMA0_RLC7_DUMMY_REG 0x03c9
#define mmSDMA0_RLC7_DUMMY_REG_BASE_IDX 0
#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
#define mmSDMA0_RLC7_RB_AQL_CNTL 0x03cc
#define mmSDMA0_RLC7_RB_AQL_CNTL_BASE_IDX 0
#define mmSDMA0_RLC7_MINOR_PTR_UPDATE 0x03cd
#define mmSDMA0_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA0 0x03d8
#define mmSDMA0_RLC7_MIDCMD_DATA0_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA1 0x03d9
#define mmSDMA0_RLC7_MIDCMD_DATA1_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA2 0x03da
#define mmSDMA0_RLC7_MIDCMD_DATA2_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA3 0x03db
#define mmSDMA0_RLC7_MIDCMD_DATA3_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA4 0x03dc
#define mmSDMA0_RLC7_MIDCMD_DATA4_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA5 0x03dd
#define mmSDMA0_RLC7_MIDCMD_DATA5_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA6 0x03de
#define mmSDMA0_RLC7_MIDCMD_DATA6_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA7 0x03df
#define mmSDMA0_RLC7_MIDCMD_DATA7_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_DATA8 0x03e0
#define mmSDMA0_RLC7_MIDCMD_DATA8_BASE_IDX 0
#define mmSDMA0_RLC7_MIDCMD_CNTL 0x03e1
#define mmSDMA0_RLC7_MIDCMD_CNTL_BASE_IDX 0
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Linaro
* Author: Christoffer Dall <[email protected]>
*/
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/seq_file.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_mmu.h>
#include "vgic.h"
/*
* Structure to control looping through the entire vgic state. We start at
* zero for each field and move upwards. So, if dist_id is 0 we print the
* distributor info. When dist_id is 1, we have already printed it and move
* on.
*
* When vcpu_id < nr_cpus we print the vcpu info until vcpu_id == nr_cpus and
* so on.
*/
struct vgic_state_iter {
int nr_cpus;
int nr_spis;
int nr_lpis;
int dist_id;
int vcpu_id;
unsigned long intid;
int lpi_idx;
};
static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
{
struct vgic_dist *dist = &kvm->arch.vgic;
if (iter->dist_id == 0) {
iter->dist_id++;
return;
}
/*
* Let the xarray drive the iterator after the last SPI, as the iterator
* has exhausted the sequentially-allocated INTID space.
*/
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) &&
iter->nr_lpis) {
if (iter->lpi_idx < iter->nr_lpis)
xa_find_after(&dist->lpi_xa, &iter->intid,
VGIC_LPI_MAX_INTID,
LPI_XA_MARK_DEBUG_ITER);
iter->lpi_idx++;
return;
}
iter->intid++;
if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
++iter->vcpu_id < iter->nr_cpus)
iter->intid = 0;
}
static int iter_mark_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq;
unsigned long intid;
int nr_lpis = 0;
xa_for_each(&dist->lpi_xa, intid, irq) {
if (!vgic_try_get_irq_kref(irq))
continue;
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
nr_lpis++;
}
return nr_lpis;
}
static void iter_unmark_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq;
unsigned long intid;
xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) {
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
vgic_put_irq(kvm, irq);
}
}
static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
loff_t pos)
{
int nr_cpus = atomic_read(&kvm->online_vcpus);
memset(iter, 0, sizeof(*iter));
iter->nr_cpus = nr_cpus;
iter->nr_spis = kvm->arch.vgic.nr_spis;
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
iter->nr_lpis = iter_mark_lpis(kvm);
/* Fast forward to the right position if needed */
while (pos--)
iter_next(kvm, iter);
}
static bool end_of_vgic(struct vgic_state_iter *iter)
{
return iter->dist_id > 0 &&
iter->vcpu_id == iter->nr_cpus &&
iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
(!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis);
}
static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
{
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
goto out;
}
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
iter = ERR_PTR(-ENOMEM);
goto out;
}
iter_init(kvm, iter, *pos);
kvm->arch.vgic.iter = iter;
if (end_of_vgic(iter))
iter = NULL;
out:
mutex_unlock(&kvm->arch.config_lock);
return iter;
}
static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
{
struct kvm *kvm = s->private;
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
++*pos;
iter_next(kvm, iter);
if (end_of_vgic(iter))
iter = NULL;
return iter;
}
static void vgic_debug_stop(struct seq_file *s, void *v)
{
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
/*
* If the seq file wasn't properly opened, there's nothing to clearn
* up.
*/
if (IS_ERR(v))
return;
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
iter_unmark_lpis(kvm);
kfree(iter);
kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->arch.config_lock);
}
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist,
struct vgic_state_iter *iter)
{
bool v3 = dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3;
seq_printf(s, "Distributor\n");
seq_printf(s, "===========\n");
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
if (v3)
seq_printf(s, "nr_lpis:\t%d\n", iter->nr_lpis);
seq_printf(s, "enabled:\t%d\n", dist->enabled);
seq_printf(s, "\n");
seq_printf(s, "P=pending_latch, L=line_level, A=active\n");
seq_printf(s, "E=enabled, H=hw, C=config (level=1, edge=0)\n");
seq_printf(s, "G=group\n");
}
static void print_header(struct seq_file *s, struct vgic_irq *irq,
struct kvm_vcpu *vcpu)
{
int id = 0;
char *hdr = "SPI ";
if (vcpu) {
hdr = "VCPU";
id = vcpu->vcpu_idx;
}
seq_printf(s, "\n");
seq_printf(s, "%s%2d TYP ID TGT_ID PLAEHCG HWID TARGET SRC PRI VCPU_ID\n", hdr, id);
seq_printf(s, "----------------------------------------------------------------\n");
}
static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
struct kvm_vcpu *vcpu)
{
char *type;
bool pending;
if (irq->intid < VGIC_NR_SGIS)
type = "SGI";
else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
type = "PPI";
else if (irq->intid < VGIC_MAX_SPI)
type = "SPI";
else
type = "LPI";
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
print_header(s, irq, vcpu);
pending = irq->pending_latch;
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
int err;
err = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&pending);
WARN_ON_ONCE(err);
}
seq_printf(s, " %s %4d "
" %2d "
"%d%d%d%d%d%d%d "
"%8d "
"%8x "
" %2x "
"%3d "
" %2d "
"\n",
type, irq->intid,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_idx : -1,
pending,
irq->line_level,
irq->active,
irq->enabled,
irq->hw,
irq->config == VGIC_CONFIG_LEVEL,
irq->group,
irq->hwintid,
irq->mpidr,
irq->source,
irq->priority,
(irq->vcpu) ? irq->vcpu->vcpu_idx : -1);
}
static int vgic_debug_show(struct seq_file *s, void *v)
{
struct kvm *kvm = s->private;
struct vgic_state_iter *iter = v;
struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL;
unsigned long flags;
if (iter->dist_id == 0) {
print_dist_state(s, &kvm->arch.vgic, iter);
return 0;
}
if (!kvm->arch.vgic.initialized)
return 0;
if (iter->vcpu_id < iter->nr_cpus)
vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
/*
* Expect this to succeed, as iter_mark_lpis() takes a reference on
* every LPI to be visited.
*/
if (iter->intid < VGIC_NR_PRIVATE_IRQS)
irq = vgic_get_vcpu_irq(vcpu, iter->intid);
else
irq = vgic_get_irq(kvm, iter->intid);
if (WARN_ON_ONCE(!irq))
return -EINVAL;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
return 0;
}
static const struct seq_operations vgic_debug_sops = {
.start = vgic_debug_start,
.next = vgic_debug_next,
.stop = vgic_debug_stop,
.show = vgic_debug_show
};
DEFINE_SEQ_ATTRIBUTE(vgic_debug);
void vgic_debug_init(struct kvm *kvm)
{
debugfs_create_file("vgic-state", 0444, kvm->debugfs_dentry, kvm,
&vgic_debug_fops);
}
void vgic_debug_destroy(struct kvm *kvm)
{
}
|
/* $Id: capiutil.c,v 1.13.6.4 2001/09/23 22:24:33 kai Exp $
*
* CAPI 2.0 convert capi message to capi message struct
*
* From CAPI 2.0 Development Kit AVM 1995 (msg.c)
* Rewritten for Linux 1996 by Carsten Paeth <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/isdn/capiutil.h>
#include <linux/slab.h>
#include "kcapi.h"
/* from CAPI2.0 DDK AVM Berlin GmbH */
typedef struct {
int typ;
size_t off;
} _cdef;
#define _CBYTE 1
#define _CWORD 2
#define _CDWORD 3
#define _CSTRUCT 4
#define _CMSTRUCT 5
#define _CEND 6
static _cdef cdef[] =
{
/*00 */
{_CEND},
/*01 */
{_CEND},
/*02 */
{_CEND},
/*03 */
{_CDWORD, offsetof(_cmsg, adr.adrController)},
/*04 */
{_CMSTRUCT, offsetof(_cmsg, AdditionalInfo)},
/*05 */
{_CSTRUCT, offsetof(_cmsg, B1configuration)},
/*06 */
{_CWORD, offsetof(_cmsg, B1protocol)},
/*07 */
{_CSTRUCT, offsetof(_cmsg, B2configuration)},
/*08 */
{_CWORD, offsetof(_cmsg, B2protocol)},
/*09 */
{_CSTRUCT, offsetof(_cmsg, B3configuration)},
/*0a */
{_CWORD, offsetof(_cmsg, B3protocol)},
/*0b */
{_CSTRUCT, offsetof(_cmsg, BC)},
/*0c */
{_CSTRUCT, offsetof(_cmsg, BChannelinformation)},
/*0d */
{_CMSTRUCT, offsetof(_cmsg, BProtocol)},
/*0e */
{_CSTRUCT, offsetof(_cmsg, CalledPartyNumber)},
/*0f */
{_CSTRUCT, offsetof(_cmsg, CalledPartySubaddress)},
/*10 */
{_CSTRUCT, offsetof(_cmsg, CallingPartyNumber)},
/*11 */
{_CSTRUCT, offsetof(_cmsg, CallingPartySubaddress)},
/*12 */
{_CDWORD, offsetof(_cmsg, CIPmask)},
/*13 */
{_CDWORD, offsetof(_cmsg, CIPmask2)},
/*14 */
{_CWORD, offsetof(_cmsg, CIPValue)},
/*15 */
{_CDWORD, offsetof(_cmsg, Class)},
/*16 */
{_CSTRUCT, offsetof(_cmsg, ConnectedNumber)},
/*17 */
{_CSTRUCT, offsetof(_cmsg, ConnectedSubaddress)},
/*18 */
{_CDWORD, offsetof(_cmsg, Data)},
/*19 */
{_CWORD, offsetof(_cmsg, DataHandle)},
/*1a */
{_CWORD, offsetof(_cmsg, DataLength)},
/*1b */
{_CSTRUCT, offsetof(_cmsg, FacilityConfirmationParameter)},
/*1c */
{_CSTRUCT, offsetof(_cmsg, Facilitydataarray)},
/*1d */
{_CSTRUCT, offsetof(_cmsg, FacilityIndicationParameter)},
/*1e */
{_CSTRUCT, offsetof(_cmsg, FacilityRequestParameter)},
/*1f */
{_CWORD, offsetof(_cmsg, FacilitySelector)},
/*20 */
{_CWORD, offsetof(_cmsg, Flags)},
/*21 */
{_CDWORD, offsetof(_cmsg, Function)},
/*22 */
{_CSTRUCT, offsetof(_cmsg, HLC)},
/*23 */
{_CWORD, offsetof(_cmsg, Info)},
/*24 */
{_CSTRUCT, offsetof(_cmsg, InfoElement)},
/*25 */
{_CDWORD, offsetof(_cmsg, InfoMask)},
/*26 */
{_CWORD, offsetof(_cmsg, InfoNumber)},
/*27 */
{_CSTRUCT, offsetof(_cmsg, Keypadfacility)},
/*28 */
{_CSTRUCT, offsetof(_cmsg, LLC)},
/*29 */
{_CSTRUCT, offsetof(_cmsg, ManuData)},
/*2a */
{_CDWORD, offsetof(_cmsg, ManuID)},
/*2b */
{_CSTRUCT, offsetof(_cmsg, NCPI)},
/*2c */
{_CWORD, offsetof(_cmsg, Reason)},
/*2d */
{_CWORD, offsetof(_cmsg, Reason_B3)},
/*2e */
{_CWORD, offsetof(_cmsg, Reject)},
/*2f */
{_CSTRUCT, offsetof(_cmsg, Useruserdata)}
};
static unsigned char *cpars[] =
{
/* ALERT_REQ */ [0x01] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_REQ */ [0x02] = "\x03\x14\x0e\x10\x0f\x11\x0d\x06\x08\x0a\x05\x07\x09\x01\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01",
/* DISCONNECT_REQ */ [0x04] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01",
/* LISTEN_REQ */ [0x05] = "\x03\x25\x12\x13\x10\x11\x01",
/* INFO_REQ */ [0x08] = "\x03\x0e\x04\x0c\x27\x2f\x1c\x01\x01",
/* FACILITY_REQ */ [0x09] = "\x03\x1f\x1e\x01",
/* SELECT_B_PROTOCOL_REQ */ [0x0a] = "\x03\x0d\x06\x08\x0a\x05\x07\x09\x01\x01",
/* CONNECT_B3_REQ */ [0x0b] = "\x03\x2b\x01",
/* DISCONNECT_B3_REQ */ [0x0d] = "\x03\x2b\x01",
/* DATA_B3_REQ */ [0x0f] = "\x03\x18\x1a\x19\x20\x01",
/* RESET_B3_REQ */ [0x10] = "\x03\x2b\x01",
/* ALERT_CONF */ [0x13] = "\x03\x23\x01",
/* CONNECT_CONF */ [0x14] = "\x03\x23\x01",
/* DISCONNECT_CONF */ [0x16] = "\x03\x23\x01",
/* LISTEN_CONF */ [0x17] = "\x03\x23\x01",
/* MANUFACTURER_REQ */ [0x18] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_CONF */ [0x1a] = "\x03\x23\x01",
/* FACILITY_CONF */ [0x1b] = "\x03\x23\x1f\x1b\x01",
/* SELECT_B_PROTOCOL_CONF */ [0x1c] = "\x03\x23\x01",
/* CONNECT_B3_CONF */ [0x1d] = "\x03\x23\x01",
/* DISCONNECT_B3_CONF */ [0x1f] = "\x03\x23\x01",
/* DATA_B3_CONF */ [0x21] = "\x03\x19\x23\x01",
/* RESET_B3_CONF */ [0x22] = "\x03\x23\x01",
/* CONNECT_IND */ [0x26] = "\x03\x14\x0e\x10\x0f\x11\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_ACTIVE_IND */ [0x27] = "\x03\x16\x17\x28\x01",
/* DISCONNECT_IND */ [0x28] = "\x03\x2c\x01",
/* MANUFACTURER_CONF */ [0x2a] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_IND */ [0x2c] = "\x03\x26\x24\x01",
/* FACILITY_IND */ [0x2d] = "\x03\x1f\x1d\x01",
/* CONNECT_B3_IND */ [0x2f] = "\x03\x2b\x01",
/* CONNECT_B3_ACTIVE_IND */ [0x30] = "\x03\x2b\x01",
/* DISCONNECT_B3_IND */ [0x31] = "\x03\x2d\x2b\x01",
/* DATA_B3_IND */ [0x33] = "\x03\x18\x1a\x19\x20\x01",
/* RESET_B3_IND */ [0x34] = "\x03\x2b\x01",
/* CONNECT_B3_T90_ACTIVE_IND */ [0x35] = "\x03\x2b\x01",
/* CONNECT_RESP */ [0x38] = "\x03\x2e\x0d\x06\x08\x0a\x05\x07\x09\x01\x16\x17\x28\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_ACTIVE_RESP */ [0x39] = "\x03\x01",
/* DISCONNECT_RESP */ [0x3a] = "\x03\x01",
/* MANUFACTURER_IND */ [0x3c] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_RESP */ [0x3e] = "\x03\x01",
/* FACILITY_RESP */ [0x3f] = "\x03\x1f\x01",
/* CONNECT_B3_RESP */ [0x41] = "\x03\x2e\x2b\x01",
/* CONNECT_B3_ACTIVE_RESP */ [0x42] = "\x03\x01",
/* DISCONNECT_B3_RESP */ [0x43] = "\x03\x01",
/* DATA_B3_RESP */ [0x45] = "\x03\x19\x01",
/* RESET_B3_RESP */ [0x46] = "\x03\x01",
/* CONNECT_B3_T90_ACTIVE_RESP */ [0x47] = "\x03\x01",
/* MANUFACTURER_RESP */ [0x4e] = "\x03\x2a\x15\x21\x29\x01",
};
/*-------------------------------------------------------*/
#define byteTLcpy(x, y) *(u8 *)(x) = *(u8 *)(y);
#define wordTLcpy(x, y) *(u16 *)(x) = *(u16 *)(y);
#define dwordTLcpy(x, y) memcpy(x, y, 4);
#define structTLcpy(x, y, l) memcpy(x, y, l)
#define structTLcpyovl(x, y, l) memmove(x, y, l)
#define byteTRcpy(x, y) *(u8 *)(y) = *(u8 *)(x);
#define wordTRcpy(x, y) *(u16 *)(y) = *(u16 *)(x);
#define dwordTRcpy(x, y) memcpy(y, x, 4);
#define structTRcpy(x, y, l) memcpy(y, x, l)
#define structTRcpyovl(x, y, l) memmove(y, x, l)
/*-------------------------------------------------------*/
static unsigned command_2_index(u8 c, u8 sc)
{
if (c & 0x80)
c = 0x9 + (c & 0x0f);
else if (c == 0x41)
c = 0x9 + 0x1;
if (c > 0x18)
c = 0x00;
return (sc & 3) * (0x9 + 0x9) + c;
}
/**
* capi_cmd2par() - find parameter string for CAPI 2.0 command/subcommand
* @cmd: command number
* @subcmd: subcommand number
*
* Return value: static string, NULL if command/subcommand unknown
*/
static unsigned char *capi_cmd2par(u8 cmd, u8 subcmd)
{
return cpars[command_2_index(cmd, subcmd)];
}
/*-------------------------------------------------------*/
#define TYP (cdef[cmsg->par[cmsg->p]].typ)
#define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off)
static void jumpcstruct(_cmsg *cmsg)
{
unsigned layer;
for (cmsg->p++, layer = 1; layer;) {
/* $$$$$ assert (cmsg->p); */
cmsg->p++;
switch (TYP) {
case _CMSTRUCT:
layer++;
break;
case _CEND:
layer--;
break;
}
}
}
/*-------------------------------------------------------*/
static char *mnames[] =
{
[0x01] = "ALERT_REQ",
[0x02] = "CONNECT_REQ",
[0x04] = "DISCONNECT_REQ",
[0x05] = "LISTEN_REQ",
[0x08] = "INFO_REQ",
[0x09] = "FACILITY_REQ",
[0x0a] = "SELECT_B_PROTOCOL_REQ",
[0x0b] = "CONNECT_B3_REQ",
[0x0d] = "DISCONNECT_B3_REQ",
[0x0f] = "DATA_B3_REQ",
[0x10] = "RESET_B3_REQ",
[0x13] = "ALERT_CONF",
[0x14] = "CONNECT_CONF",
[0x16] = "DISCONNECT_CONF",
[0x17] = "LISTEN_CONF",
[0x18] = "MANUFACTURER_REQ",
[0x1a] = "INFO_CONF",
[0x1b] = "FACILITY_CONF",
[0x1c] = "SELECT_B_PROTOCOL_CONF",
[0x1d] = "CONNECT_B3_CONF",
[0x1f] = "DISCONNECT_B3_CONF",
[0x21] = "DATA_B3_CONF",
[0x22] = "RESET_B3_CONF",
[0x26] = "CONNECT_IND",
[0x27] = "CONNECT_ACTIVE_IND",
[0x28] = "DISCONNECT_IND",
[0x2a] = "MANUFACTURER_CONF",
[0x2c] = "INFO_IND",
[0x2d] = "FACILITY_IND",
[0x2f] = "CONNECT_B3_IND",
[0x30] = "CONNECT_B3_ACTIVE_IND",
[0x31] = "DISCONNECT_B3_IND",
[0x33] = "DATA_B3_IND",
[0x34] = "RESET_B3_IND",
[0x35] = "CONNECT_B3_T90_ACTIVE_IND",
[0x38] = "CONNECT_RESP",
[0x39] = "CONNECT_ACTIVE_RESP",
[0x3a] = "DISCONNECT_RESP",
[0x3c] = "MANUFACTURER_IND",
[0x3e] = "INFO_RESP",
[0x3f] = "FACILITY_RESP",
[0x41] = "CONNECT_B3_RESP",
[0x42] = "CONNECT_B3_ACTIVE_RESP",
[0x43] = "DISCONNECT_B3_RESP",
[0x45] = "DATA_B3_RESP",
[0x46] = "RESET_B3_RESP",
[0x47] = "CONNECT_B3_T90_ACTIVE_RESP",
[0x4e] = "MANUFACTURER_RESP"
};
/**
* capi_cmd2str() - convert CAPI 2.0 command/subcommand number to name
* @cmd: command number
* @subcmd: subcommand number
*
* Return value: static string
*/
char *capi_cmd2str(u8 cmd, u8 subcmd)
{
char *result;
result = mnames[command_2_index(cmd, subcmd)];
if (result == NULL)
result = "INVALID_COMMAND";
return result;
}
/*-------------------------------------------------------*/
#ifdef CONFIG_CAPI_TRACE
/*-------------------------------------------------------*/
static char *pnames[] =
{
/*00 */ NULL,
/*01 */ NULL,
/*02 */ NULL,
/*03 */ "Controller/PLCI/NCCI",
/*04 */ "AdditionalInfo",
/*05 */ "B1configuration",
/*06 */ "B1protocol",
/*07 */ "B2configuration",
/*08 */ "B2protocol",
/*09 */ "B3configuration",
/*0a */ "B3protocol",
/*0b */ "BC",
/*0c */ "BChannelinformation",
/*0d */ "BProtocol",
/*0e */ "CalledPartyNumber",
/*0f */ "CalledPartySubaddress",
/*10 */ "CallingPartyNumber",
/*11 */ "CallingPartySubaddress",
/*12 */ "CIPmask",
/*13 */ "CIPmask2",
/*14 */ "CIPValue",
/*15 */ "Class",
/*16 */ "ConnectedNumber",
/*17 */ "ConnectedSubaddress",
/*18 */ "Data32",
/*19 */ "DataHandle",
/*1a */ "DataLength",
/*1b */ "FacilityConfirmationParameter",
/*1c */ "Facilitydataarray",
/*1d */ "FacilityIndicationParameter",
/*1e */ "FacilityRequestParameter",
/*1f */ "FacilitySelector",
/*20 */ "Flags",
/*21 */ "Function",
/*22 */ "HLC",
/*23 */ "Info",
/*24 */ "InfoElement",
/*25 */ "InfoMask",
/*26 */ "InfoNumber",
/*27 */ "Keypadfacility",
/*28 */ "LLC",
/*29 */ "ManuData",
/*2a */ "ManuID",
/*2b */ "NCPI",
/*2c */ "Reason",
/*2d */ "Reason_B3",
/*2e */ "Reject",
/*2f */ "Useruserdata"
};
#include <linux/stdarg.h>
/*-------------------------------------------------------*/
static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt, ...)
{
va_list f;
size_t n, r;
if (!cdb)
return NULL;
va_start(f, fmt);
r = cdb->size - cdb->pos;
n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
if (n >= r) {
/* truncated, need bigger buffer */
size_t ns = 2 * cdb->size;
u_char *nb;
while ((ns - cdb->pos) <= n)
ns *= 2;
nb = kmalloc(ns, GFP_ATOMIC);
if (!nb) {
cdebbuf_free(cdb);
return NULL;
}
memcpy(nb, cdb->buf, cdb->pos);
kfree(cdb->buf);
nb[cdb->pos] = 0;
cdb->buf = nb;
cdb->p = cdb->buf + cdb->pos;
cdb->size = ns;
va_start(f, fmt);
r = cdb->size - cdb->pos;
n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
}
cdb->p += n;
cdb->pos += n;
return cdb;
}
static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 *m, unsigned len)
{
unsigned hex = 0;
if (!cdb)
return NULL;
for (; len; len--, m++)
if (isalnum(*m) || *m == ' ') {
if (hex)
cdb = bufprint(cdb, ">");
cdb = bufprint(cdb, "%c", *m);
hex = 0;
} else {
if (!hex)
cdb = bufprint(cdb, "<%02x", *m);
else
cdb = bufprint(cdb, " %02x", *m);
hex = 1;
}
if (hex)
cdb = bufprint(cdb, ">");
return cdb;
}
static _cdebbuf *printstruct(_cdebbuf *cdb, u8 *m)
{
unsigned len;
if (m[0] != 0xff) {
len = m[0];
m += 1;
} else {
len = ((u16 *) (m + 1))[0];
m += 3;
}
cdb = printstructlen(cdb, m, len);
return cdb;
}
/*-------------------------------------------------------*/
#define NAME (pnames[cmsg->par[cmsg->p]])
static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
{
if (!cmsg->par)
return NULL; /* invalid command/subcommand */
for (; TYP != _CEND; cmsg->p++) {
int slen = 29 + 3 - level;
int i;
if (!cdb)
return NULL;
cdb = bufprint(cdb, " ");
for (i = 0; i < level - 1; i++)
cdb = bufprint(cdb, " ");
switch (TYP) {
case _CBYTE:
cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
cmsg->l++;
break;
case _CWORD:
cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
cmsg->l += 2;
break;
case _CDWORD:
cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
cmsg->l += 4;
break;
case _CSTRUCT:
cdb = bufprint(cdb, "%-*s = ", slen, NAME);
if (cmsg->m[cmsg->l] == '\0')
cdb = bufprint(cdb, "default");
else
cdb = printstruct(cdb, cmsg->m + cmsg->l);
cdb = bufprint(cdb, "\n");
if (cmsg->m[cmsg->l] != 0xff)
cmsg->l += 1 + cmsg->m[cmsg->l];
else
cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1);
break;
case _CMSTRUCT:
/*----- Metastruktur 0 -----*/
if (cmsg->m[cmsg->l] == '\0') {
cdb = bufprint(cdb, "%-*s = default\n", slen, NAME);
cmsg->l++;
jumpcstruct(cmsg);
} else {
char *name = NAME;
unsigned _l = cmsg->l;
cdb = bufprint(cdb, "%-*s\n", slen, name);
cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
cmsg->p++;
cdb = protocol_message_2_pars(cdb, cmsg, level + 1);
}
break;
}
}
return cdb;
}
/*-------------------------------------------------------*/
static _cdebbuf *g_debbuf;
static u_long g_debbuf_lock;
static _cmsg *g_cmsg;
static _cdebbuf *cdebbuf_alloc(void)
{
_cdebbuf *cdb;
if (likely(!test_and_set_bit(1, &g_debbuf_lock))) {
cdb = g_debbuf;
goto init;
} else
cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC);
if (!cdb)
return NULL;
cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC);
if (!cdb->buf) {
kfree(cdb);
return NULL;
}
cdb->size = CDEBUG_SIZE;
init:
cdb->buf[0] = 0;
cdb->p = cdb->buf;
cdb->pos = 0;
return cdb;
}
/**
* cdebbuf_free() - free CAPI debug buffer
* @cdb: buffer to free
*/
void cdebbuf_free(_cdebbuf *cdb)
{
if (likely(cdb == g_debbuf)) {
test_and_clear_bit(1, &g_debbuf_lock);
return;
}
if (likely(cdb))
kfree(cdb->buf);
kfree(cdb);
}
/**
* capi_message2str() - format CAPI 2.0 message for printing
* @msg: CAPI 2.0 message
*
* Allocates a CAPI debug buffer and fills it with a printable representation
* of the CAPI 2.0 message in @msg.
* Return value: allocated debug buffer, NULL on error
* The returned buffer should be freed by a call to cdebbuf_free() after use.
*/
_cdebbuf *capi_message2str(u8 *msg)
{
_cdebbuf *cdb;
_cmsg *cmsg;
cdb = cdebbuf_alloc();
if (unlikely(!cdb))
return NULL;
if (likely(cdb == g_debbuf))
cmsg = g_cmsg;
else
cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC);
if (unlikely(!cmsg)) {
cdebbuf_free(cdb);
return NULL;
}
cmsg->m = msg;
cmsg->l = 8;
cmsg->p = 0;
byteTRcpy(cmsg->m + 4, &cmsg->Command);
byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
((unsigned short *) msg)[1],
((unsigned short *) msg)[3],
((unsigned short *) msg)[0]);
cdb = protocol_message_2_pars(cdb, cmsg, 1);
if (unlikely(cmsg != g_cmsg))
kfree(cmsg);
return cdb;
}
int __init cdebug_init(void)
{
g_cmsg = kmalloc(sizeof(_cmsg), GFP_KERNEL);
if (!g_cmsg)
return -ENOMEM;
g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
if (!g_debbuf) {
kfree(g_cmsg);
return -ENOMEM;
}
g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
if (!g_debbuf->buf) {
kfree(g_cmsg);
kfree(g_debbuf);
return -ENOMEM;
}
g_debbuf->size = CDEBUG_GSIZE;
g_debbuf->buf[0] = 0;
g_debbuf->p = g_debbuf->buf;
g_debbuf->pos = 0;
return 0;
}
void cdebug_exit(void)
{
if (g_debbuf)
kfree(g_debbuf->buf);
kfree(g_debbuf);
kfree(g_cmsg);
}
#else /* !CONFIG_CAPI_TRACE */
static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0};
_cdebbuf *capi_message2str(u8 *msg)
{
return &g_debbuf;
}
_cdebbuf *capi_cmsg2str(_cmsg *cmsg)
{
return &g_debbuf;
}
void cdebbuf_free(_cdebbuf *cdb)
{
}
int __init cdebug_init(void)
{
return 0;
}
void cdebug_exit(void)
{
}
#endif
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2023 Cirrus Logic, Inc. and
* Cirrus Logic International Semiconductor Ltd.
*/
#ifndef CIRRUS_SCODEC_H
#define CIRRUS_SCODEC_H
int cirrus_scodec_get_speaker_id(struct device *dev, int amp_index,
int num_amps, int fixed_gpio_id);
#endif /* CIRRUS_SCODEC_H */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP855x Backlight Driver
*
* Copyright (C) 2011 Texas Instruments
*/
#ifndef _LP855X_H
#define _LP855X_H
#define BL_CTL_SHFT (0)
#define BRT_MODE_SHFT (1)
#define BRT_MODE_MASK (0x06)
/* Enable backlight. Only valid when BRT_MODE=10(I2C only) */
#define ENABLE_BL (1)
#define DISABLE_BL (0)
#define I2C_CONFIG(id) id ## _I2C_CONFIG
#define PWM_CONFIG(id) id ## _PWM_CONFIG
/* DEVICE CONTROL register - LP8550 */
#define LP8550_PWM_CONFIG (LP8550_PWM_ONLY << BRT_MODE_SHFT)
#define LP8550_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \
(LP8550_I2C_ONLY << BRT_MODE_SHFT))
/* DEVICE CONTROL register - LP8551 */
#define LP8551_PWM_CONFIG LP8550_PWM_CONFIG
#define LP8551_I2C_CONFIG LP8550_I2C_CONFIG
/* DEVICE CONTROL register - LP8552 */
#define LP8552_PWM_CONFIG LP8550_PWM_CONFIG
#define LP8552_I2C_CONFIG LP8550_I2C_CONFIG
/* DEVICE CONTROL register - LP8553 */
#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG
#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG
/* CONFIG register - LP8555 */
#define LP8555_PWM_STANDBY BIT(7)
#define LP8555_PWM_FILTER BIT(6)
#define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
when the backlight turns on */
#define LP8555_OFF_OPENLEDS BIT(2)
#define LP8555_PWM_CONFIG LP8555_PWM_ONLY
#define LP8555_I2C_CONFIG LP8555_I2C_ONLY
#define LP8555_COMB1_CONFIG LP8555_COMBINED1
#define LP8555_COMB2_CONFIG LP8555_COMBINED2
/* DEVICE CONTROL register - LP8556 */
#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT)
#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT)
#define LP8556_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \
(LP8556_I2C_ONLY << BRT_MODE_SHFT))
#define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT)
#define LP8556_FAST_CONFIG BIT(7) /* use it if EPROMs should be maintained
when exiting the low power mode */
/* CONFIG register - LP8557 */
#define LP8557_PWM_STANDBY BIT(7)
#define LP8557_PWM_FILTER BIT(6)
#define LP8557_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
when the backlight turns on */
#define LP8557_OFF_OPENLEDS BIT(2)
#define LP8557_PWM_CONFIG LP8557_PWM_ONLY
#define LP8557_I2C_CONFIG LP8557_I2C_ONLY
#define LP8557_COMB1_CONFIG LP8557_COMBINED1
#define LP8557_COMB2_CONFIG LP8557_COMBINED2
enum lp855x_chip_id {
LP8550,
LP8551,
LP8552,
LP8553,
LP8555,
LP8556,
LP8557,
};
enum lp8550_brighntess_source {
LP8550_PWM_ONLY,
LP8550_I2C_ONLY = 2,
};
enum lp8551_brighntess_source {
LP8551_PWM_ONLY = LP8550_PWM_ONLY,
LP8551_I2C_ONLY = LP8550_I2C_ONLY,
};
enum lp8552_brighntess_source {
LP8552_PWM_ONLY = LP8550_PWM_ONLY,
LP8552_I2C_ONLY = LP8550_I2C_ONLY,
};
enum lp8553_brighntess_source {
LP8553_PWM_ONLY = LP8550_PWM_ONLY,
LP8553_I2C_ONLY = LP8550_I2C_ONLY,
};
enum lp8555_brightness_source {
LP8555_PWM_ONLY,
LP8555_I2C_ONLY,
LP8555_COMBINED1, /* Brightness register with shaped PWM */
LP8555_COMBINED2, /* PWM with shaped brightness register */
};
enum lp8556_brightness_source {
LP8556_PWM_ONLY,
LP8556_COMBINED1, /* pwm + i2c before the shaper block */
LP8556_I2C_ONLY,
LP8556_COMBINED2, /* pwm + i2c after the shaper block */
};
enum lp8557_brightness_source {
LP8557_PWM_ONLY,
LP8557_I2C_ONLY,
LP8557_COMBINED1, /* pwm + i2c after the shaper block */
LP8557_COMBINED2, /* pwm + i2c before the shaper block */
};
struct lp855x_rom_data {
u8 addr;
u8 val;
};
/**
* struct lp855x_platform_data
* @name : Backlight driver name. If it is not defined, default name is set.
* @device_control : value of DEVICE CONTROL register
* @initial_brightness : initial value of backlight brightness
* @period_ns : platform specific pwm period value. unit is nano.
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
*/
struct lp855x_platform_data {
const char *name;
u8 device_control;
u8 initial_brightness;
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
};
#endif
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/objagg.h>
struct tokey {
unsigned int id;
};
#define NUM_KEYS 32
static int key_id_index(unsigned int key_id)
{
if (key_id >= NUM_KEYS) {
WARN_ON(1);
return 0;
}
return key_id;
}
#define BUF_LEN 128
struct world {
unsigned int root_count;
unsigned int delta_count;
char next_root_buf[BUF_LEN];
struct objagg_obj *objagg_objs[NUM_KEYS];
unsigned int key_refs[NUM_KEYS];
};
struct root {
struct tokey key;
char buf[BUF_LEN];
};
struct delta {
unsigned int key_id_diff;
};
static struct objagg_obj *world_obj_get(struct world *world,
struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
struct tokey key;
int err;
key.id = key_id;
objagg_obj = objagg_obj_get(objagg, &key);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return objagg_obj;
}
if (!world->key_refs[key_id_index(key_id)]) {
world->objagg_objs[key_id_index(key_id)] = objagg_obj;
} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
pr_err("Key %u: Got another object for the same key.\n",
key_id);
err = -EINVAL;
goto err_key_id_check;
}
world->key_refs[key_id_index(key_id)]++;
return objagg_obj;
err_key_id_check:
objagg_obj_put(objagg, objagg_obj);
return ERR_PTR(err);
}
static void world_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
if (!world->key_refs[key_id_index(key_id)])
return;
objagg_obj = world->objagg_objs[key_id_index(key_id)];
objagg_obj_put(objagg, objagg_obj);
world->key_refs[key_id_index(key_id)]--;
}
#define MAX_KEY_ID_DIFF 5
static bool delta_check(void *priv, const void *parent_obj, const void *obj)
{
const struct tokey *parent_key = parent_obj;
const struct tokey *key = obj;
int diff = key->id - parent_key->id;
return diff >= 0 && diff <= MAX_KEY_ID_DIFF;
}
static void *delta_create(void *priv, void *parent_obj, void *obj)
{
struct tokey *parent_key = parent_obj;
struct world *world = priv;
struct tokey *key = obj;
int diff = key->id - parent_key->id;
struct delta *delta;
if (!delta_check(priv, parent_obj, obj))
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->key_id_diff = diff;
world->delta_count++;
return delta;
}
static void delta_destroy(void *priv, void *delta_priv)
{
struct delta *delta = delta_priv;
struct world *world = priv;
world->delta_count--;
kfree(delta);
}
static void *root_create(void *priv, void *obj, unsigned int id)
{
struct world *world = priv;
struct tokey *key = obj;
struct root *root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
memcpy(&root->key, key, sizeof(root->key));
memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
world->root_count++;
return root;
}
static void root_destroy(void *priv, void *root_priv)
{
struct root *root = root_priv;
struct world *world = priv;
world->root_count--;
kfree(root);
}
static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_create_root)
{
unsigned int orig_root_count = world->root_count;
struct objagg_obj *objagg_obj;
const struct root *root;
int err;
if (should_create_root)
get_random_bytes(world->next_root_buf,
sizeof(world->next_root_buf));
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return PTR_ERR(objagg_obj);
}
if (should_create_root) {
if (world->root_count != orig_root_count + 1) {
pr_err("Key %u: Root was not created\n", key_id);
err = -EINVAL;
goto err_check_root_count;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly created\n",
key_id);
err = -EINVAL;
goto err_check_root_count;
}
}
root = objagg_obj_root_priv(objagg_obj);
if (root->key.id != key_id) {
pr_err("Key %u: Root has unexpected key id\n", key_id);
err = -EINVAL;
goto err_check_key_id;
}
if (should_create_root &&
memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
pr_err("Key %u: Buffer does not match the expected content\n",
key_id);
err = -EINVAL;
goto err_check_buf;
}
return 0;
err_check_buf:
err_check_key_id:
err_check_root_count:
objagg_obj_put(objagg, objagg_obj);
return err;
}
static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_destroy_root)
{
unsigned int orig_root_count = world->root_count;
world_obj_put(world, objagg, key_id);
if (should_destroy_root) {
if (world->root_count != orig_root_count - 1) {
pr_err("Key %u: Root was not destroyed\n", key_id);
return -EINVAL;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly destroyed\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static int check_stats_zero(struct objagg *objagg)
{
const struct objagg_stats *stats;
int err = 0;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != 0) {
pr_err("Stats: Object count is not zero while it should be\n");
err = -EINVAL;
}
objagg_stats_put(stats);
return err;
}
static int check_stats_nodelta(struct objagg *objagg)
{
const struct objagg_stats *stats;
int i;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != NUM_KEYS) {
pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
NUM_KEYS, stats->stats_info_count);
err = -EINVAL;
goto stats_put;
}
for (i = 0; i < stats->stats_info_count; i++) {
if (stats->stats_info[i].stats.user_count != 2) {
pr_err("Stats: incorrect user count\n");
err = -EINVAL;
goto stats_put;
}
if (stats->stats_info[i].stats.delta_user_count != 2) {
pr_err("Stats: incorrect delta user count\n");
err = -EINVAL;
goto stats_put;
}
}
err = 0;
stats_put:
objagg_stats_put(stats);
return err;
}
static bool delta_check_dummy(void *priv, const void *parent_obj,
const void *obj)
{
return false;
}
static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
{
return ERR_PTR(-EOPNOTSUPP);
}
static void delta_destroy_dummy(void *priv, void *delta_priv)
{
}
static const struct objagg_ops nodelta_ops = {
.obj_size = sizeof(struct tokey),
.delta_check = delta_check_dummy,
.delta_create = delta_create_dummy,
.delta_destroy = delta_destroy_dummy,
.root_create = root_create,
.root_destroy = root_destroy,
};
static int test_nodelta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&nodelta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
err = check_stats_zero(objagg);
if (err)
goto err_stats_first_zero;
/* First round of gets, the root objects should be created */
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, true);
if (err)
goto err_obj_first_get;
}
/* Do the second round of gets, all roots are already created,
* make sure that no new root is created
*/
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, false);
if (err)
goto err_obj_second_get;
}
err = check_stats_nodelta(objagg);
if (err)
goto err_stats_nodelta;
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, false);
if (err)
goto err_obj_first_put;
}
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, true);
if (err)
goto err_obj_second_put;
}
err = check_stats_zero(objagg);
if (err)
goto err_stats_second_zero;
objagg_destroy(objagg);
return 0;
err_stats_nodelta:
err_obj_first_put:
err_obj_second_get:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
i = NUM_KEYS;
err_obj_first_get:
err_obj_second_put:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
err_stats_first_zero:
err_stats_second_zero:
objagg_destroy(objagg);
return err;
}
static const struct objagg_ops delta_ops = {
.obj_size = sizeof(struct tokey),
.delta_check = delta_check,
.delta_create = delta_create,
.delta_destroy = delta_destroy,
.root_create = root_create,
.root_destroy = root_destroy,
};
enum action {
ACTION_GET,
ACTION_PUT,
};
enum expect_delta {
EXPECT_DELTA_SAME,
EXPECT_DELTA_INC,
EXPECT_DELTA_DEC,
};
enum expect_root {
EXPECT_ROOT_SAME,
EXPECT_ROOT_INC,
EXPECT_ROOT_DEC,
};
struct expect_stats_info {
struct objagg_obj_stats stats;
bool is_root;
unsigned int key_id;
};
struct expect_stats {
unsigned int info_count;
struct expect_stats_info info[NUM_KEYS];
};
struct action_item {
unsigned int key_id;
enum action action;
enum expect_delta expect_delta;
enum expect_root expect_root;
struct expect_stats expect_stats;
};
#define EXPECT_STATS(count, ...) \
{ \
.info_count = count, \
.info = { __VA_ARGS__ } \
}
#define ROOT(key_id, user_count, delta_user_count) \
{{user_count, delta_user_count}, true, key_id}
#define DELTA(key_id, user_count) \
{{user_count, user_count}, false, key_id}
static const struct action_item action_items[] = {
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(1, ROOT(1, 1, 1)),
}, /* r: 1 d: */
{
7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
}, /* r: 1, 7 d: */
{
3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
DELTA(3, 1)),
}, /* r: 1, 7 d: 3^1 */
{
5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 5^1 */
{
3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
{
30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
{
5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30 d: 8^7, 8^7 */
{
5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30, 5 d: 8^7, 8^7 */
{
6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 1), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 7, 30, 5 d: 6^5, 8^5 */
{
7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 30, 5 d: 6^5, 8^5 */
{
30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(5, 1, 3),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 5 d: 6^5, 8^5 */
{
5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(5, 0, 2),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: d: 6^5, 8^5 */
{
6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(2, ROOT(5, 0, 1),
DELTA(8, 1)),
}, /* r: d: 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(0, ),
}, /* r: d: */
};
static int check_expect(struct world *world,
const struct action_item *action_item,
unsigned int orig_delta_count,
unsigned int orig_root_count)
{
unsigned int key_id = action_item->key_id;
switch (action_item->expect_delta) {
case EXPECT_DELTA_SAME:
if (orig_delta_count != world->delta_count) {
pr_err("Key %u: Delta count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_delta_count + 1 != world->delta_count) {
pr_err("Key %u: Delta count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_delta_count - 1 != world->delta_count) {
pr_err("Key %u: Delta count was not decremented.\n",
key_id);
return -EINVAL;
}
break;
}
switch (action_item->expect_root) {
case EXPECT_ROOT_SAME:
if (orig_root_count != world->root_count) {
pr_err("Key %u: Root count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_root_count + 1 != world->root_count) {
pr_err("Key %u: Root count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_root_count - 1 != world->root_count) {
pr_err("Key %u: Root count was not decremented.\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
{
const struct tokey *root_key;
const struct delta *delta;
unsigned int key_id;
root_key = objagg_obj_root_priv(objagg_obj);
key_id = root_key->id;
delta = objagg_obj_delta_priv(objagg_obj);
if (delta)
key_id += delta->key_id_diff;
return key_id;
}
static int
check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (stats_info->is_root != expect_stats_info->is_root) {
if (errmsg)
*errmsg = "Incorrect root/delta indication";
return -EINVAL;
}
if (stats_info->stats.user_count !=
expect_stats_info->stats.user_count) {
if (errmsg)
*errmsg = "Incorrect user count";
return -EINVAL;
}
if (stats_info->stats.delta_user_count !=
expect_stats_info->stats.delta_user_count) {
if (errmsg)
*errmsg = "Incorrect delta user count";
return -EINVAL;
}
return 0;
}
static int
check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (obj_to_key_id(stats_info->objagg_obj) !=
expect_stats_info->key_id) {
if (errmsg)
*errmsg = "incorrect key id";
return -EINVAL;
}
return 0;
}
static int check_expect_stats_neigh(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
int pos)
{
int i;
int err;
for (i = pos - 1; i >= 0; i--) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
for (i = pos + 1; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
return -EINVAL;
}
static int __check_expect_stats(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
const char **errmsg)
{
int i;
int err;
if (stats->stats_info_count != expect_stats->info_count) {
*errmsg = "Unexpected object count";
return -EINVAL;
}
for (i = 0; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err)
return err;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err) {
/* It is possible that one of the neighbor stats with
* same numbers have the correct key id, so check it
*/
err = check_expect_stats_neigh(stats, expect_stats, i);
if (err)
return err;
}
}
return 0;
}
static int check_expect_stats(struct objagg *objagg,
const struct expect_stats *expect_stats,
const char **errmsg)
{
const struct objagg_stats *stats;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats)) {
*errmsg = "objagg_stats_get() failed.";
return PTR_ERR(stats);
}
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
}
static int test_delta_action_item(struct world *world,
struct objagg *objagg,
const struct action_item *action_item,
bool inverse)
{
unsigned int orig_delta_count = world->delta_count;
unsigned int orig_root_count = world->root_count;
unsigned int key_id = action_item->key_id;
enum action action = action_item->action;
struct objagg_obj *objagg_obj;
const char *errmsg;
int err;
if (inverse)
action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
switch (action) {
case ACTION_GET:
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj))
return PTR_ERR(objagg_obj);
break;
case ACTION_PUT:
world_obj_put(world, objagg, key_id);
break;
}
if (inverse)
return 0;
err = check_expect(world, action_item,
orig_delta_count, orig_root_count);
if (err)
goto errout;
err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
if (err) {
pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
goto errout;
}
return 0;
errout:
/* This can only happen when action is not inversed.
* So in case of an error, cleanup by doing inverse action.
*/
test_delta_action_item(world, objagg, action_item, true);
return err;
}
static int test_delta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&delta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
for (i = 0; i < ARRAY_SIZE(action_items); i++) {
err = test_delta_action_item(&world, objagg,
&action_items[i], false);
if (err)
goto err_do_action_item;
}
objagg_destroy(objagg);
return 0;
err_do_action_item:
for (i--; i >= 0; i--)
test_delta_action_item(&world, objagg, &action_items[i], true);
objagg_destroy(objagg);
return err;
}
struct hints_case {
const unsigned int *key_ids;
size_t key_ids_count;
struct expect_stats expect_stats;
struct expect_stats expect_stats_hints;
};
static const unsigned int hints_case_key_ids[] = {
1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8,
};
static const struct hints_case hints_case = {
.key_ids = hints_case_key_ids,
.key_ids_count = ARRAY_SIZE(hints_case_key_ids),
.expect_stats =
EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(3, 2),
DELTA(5, 2), DELTA(6, 1)),
.expect_stats_hints =
EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(5, 2),
DELTA(6, 1), DELTA(7, 1)),
};
static void __pr_debug_stats(const struct objagg_stats *stats)
{
int i;
for (i = 0; i < stats->stats_info_count; i++)
pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i,
obj_to_key_id(stats->stats_info[i].objagg_obj),
stats->stats_info[i].stats.user_count,
stats->stats_info[i].stats.delta_user_count,
stats->stats_info[i].is_root ? "root" : "noroot");
}
static void pr_debug_stats(struct objagg *objagg)
{
const struct objagg_stats *stats;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return;
__pr_debug_stats(stats);
objagg_stats_put(stats);
}
static void pr_debug_hints_stats(struct objagg_hints *objagg_hints)
{
const struct objagg_stats *stats;
stats = objagg_hints_stats_get(objagg_hints);
if (IS_ERR(stats))
return;
__pr_debug_stats(stats);
objagg_stats_put(stats);
}
static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
const struct expect_stats *expect_stats,
const char **errmsg)
{
const struct objagg_stats *stats;
int err;
stats = objagg_hints_stats_get(objagg_hints);
if (IS_ERR(stats))
return PTR_ERR(stats);
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
}
static int test_hints_case(const struct hints_case *hints_case)
{
struct objagg_obj *objagg_obj;
struct objagg_hints *hints;
struct world world2 = {};
struct world world = {};
struct objagg *objagg2;
struct objagg *objagg;
const char *errmsg;
int i;
int err;
objagg = objagg_create(&delta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
for (i = 0; i < hints_case->key_ids_count; i++) {
objagg_obj = world_obj_get(&world, objagg,
hints_case->key_ids[i]);
if (IS_ERR(objagg_obj)) {
err = PTR_ERR(objagg_obj);
goto err_world_obj_get;
}
}
pr_debug_stats(objagg);
err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
if (err) {
pr_err("Stats: %s\n", errmsg);
goto err_check_expect_stats;
}
hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
if (IS_ERR(hints)) {
err = PTR_ERR(hints);
goto err_hints_get;
}
pr_debug_hints_stats(hints);
err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Hints stats: %s\n", errmsg);
goto err_check_expect_hints_stats;
}
objagg2 = objagg_create(&delta_ops, hints, &world2);
if (IS_ERR(objagg2))
return PTR_ERR(objagg2);
for (i = 0; i < hints_case->key_ids_count; i++) {
objagg_obj = world_obj_get(&world2, objagg2,
hints_case->key_ids[i]);
if (IS_ERR(objagg_obj)) {
err = PTR_ERR(objagg_obj);
goto err_world2_obj_get;
}
}
pr_debug_stats(objagg2);
err = check_expect_stats(objagg2, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Stats2: %s\n", errmsg);
goto err_check_expect_stats2;
}
err = 0;
err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
i = hints_case->key_ids_count;
objagg_destroy(objagg2);
err_check_expect_hints_stats:
objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, hints_case->key_ids[i]);
objagg_destroy(objagg);
return err;
}
static int test_hints(void)
{
return test_hints_case(&hints_case);
}
static int __init test_objagg_init(void)
{
int err;
err = test_nodelta();
if (err)
return err;
err = test_delta();
if (err)
return err;
return test_hints();
}
static void __exit test_objagg_exit(void)
{
}
module_init(test_objagg_init);
module_exit(test_objagg_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <[email protected]>");
MODULE_DESCRIPTION("Test module for objagg");
|
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_deny_namespace.skel.h"
#include <sched.h>
#include "cap_helpers.h"
#include <stdio.h>
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
/* negative return value -> some internal error
* positive return value -> userns creation failed
* 0 -> userns creation succeeded
*/
static int create_user_ns(void)
{
pid_t pid;
pid = fork();
if (pid < 0)
return -1;
if (pid == 0) {
if (unshare(CLONE_NEWUSER))
_exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
}
return wait_for_pid(pid);
}
static void test_userns_create_bpf(void)
{
__u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
__u64 old_caps = 0;
cap_enable_effective(cap_mask, &old_caps);
ASSERT_OK(create_user_ns(), "priv new user ns");
cap_disable_effective(cap_mask, &old_caps);
ASSERT_EQ(create_user_ns(), EPERM, "unpriv new user ns");
if (cap_mask & old_caps)
cap_enable_effective(cap_mask, NULL);
}
static void test_unpriv_userns_create_no_bpf(void)
{
__u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
__u64 old_caps = 0;
cap_disable_effective(cap_mask, &old_caps);
ASSERT_OK(create_user_ns(), "no-bpf unpriv new user ns");
if (cap_mask & old_caps)
cap_enable_effective(cap_mask, NULL);
}
void test_deny_namespace(void)
{
struct test_deny_namespace *skel = NULL;
int err;
if (test__start_subtest("unpriv_userns_create_no_bpf"))
test_unpriv_userns_create_no_bpf();
skel = test_deny_namespace__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel load"))
goto close_prog;
err = test_deny_namespace__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto close_prog;
if (test__start_subtest("userns_create_bpf"))
test_userns_create_bpf();
test_deny_namespace__detach(skel);
close_prog:
test_deny_namespace__destroy(skel);
}
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2021 Rockchip Electronics Co., Ltd.
*/
#include <dt-bindings/pinctrl/rockchip.h>
#include "rockchip-pinconf.dtsi"
/*
* This file is auto generated by pin2dts tool, please keep these code
* by adding changes at end of this file.
*/
&pinctrl {
acodec {
/omit-if-no-ref/
acodec_pins: acodec-pins {
rockchip,pins =
/* acodec_adc_sync */
<1 RK_PB1 5 &pcfg_pull_none>,
/* acodec_adcclk */
<1 RK_PA1 5 &pcfg_pull_none>,
/* acodec_adcdata */
<1 RK_PA0 5 &pcfg_pull_none>,
/* acodec_dac_datal */
<1 RK_PA7 5 &pcfg_pull_none>,
/* acodec_dac_datar */
<1 RK_PB0 5 &pcfg_pull_none>,
/* acodec_dacclk */
<1 RK_PA3 5 &pcfg_pull_none>,
/* acodec_dacsync */
<1 RK_PA5 5 &pcfg_pull_none>;
};
};
audiopwm {
/omit-if-no-ref/
audiopwm_lout: audiopwm-lout {
rockchip,pins =
/* audiopwm_lout */
<1 RK_PA0 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
audiopwm_loutn: audiopwm-loutn {
rockchip,pins =
/* audiopwm_loutn */
<1 RK_PA1 6 &pcfg_pull_none>;
};
/omit-if-no-ref/
audiopwm_loutp: audiopwm-loutp {
rockchip,pins =
/* audiopwm_loutp */
<1 RK_PA0 6 &pcfg_pull_none>;
};
/omit-if-no-ref/
audiopwm_rout: audiopwm-rout {
rockchip,pins =
/* audiopwm_rout */
<1 RK_PA1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
audiopwm_routn: audiopwm-routn {
rockchip,pins =
/* audiopwm_routn */
<1 RK_PA7 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
audiopwm_routp: audiopwm-routp {
rockchip,pins =
/* audiopwm_routp */
<1 RK_PA6 4 &pcfg_pull_none>;
};
};
bt656 {
/omit-if-no-ref/
bt656m0_pins: bt656m0-pins {
rockchip,pins =
/* bt656_clkm0 */
<3 RK_PA0 2 &pcfg_pull_none>,
/* bt656_d0m0 */
<2 RK_PD0 2 &pcfg_pull_none>,
/* bt656_d1m0 */
<2 RK_PD1 2 &pcfg_pull_none>,
/* bt656_d2m0 */
<2 RK_PD2 2 &pcfg_pull_none>,
/* bt656_d3m0 */
<2 RK_PD3 2 &pcfg_pull_none>,
/* bt656_d4m0 */
<2 RK_PD4 2 &pcfg_pull_none>,
/* bt656_d5m0 */
<2 RK_PD5 2 &pcfg_pull_none>,
/* bt656_d6m0 */
<2 RK_PD6 2 &pcfg_pull_none>,
/* bt656_d7m0 */
<2 RK_PD7 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
bt656m1_pins: bt656m1-pins {
rockchip,pins =
/* bt656_clkm1 */
<4 RK_PB4 5 &pcfg_pull_none>,
/* bt656_d0m1 */
<3 RK_PC6 5 &pcfg_pull_none>,
/* bt656_d1m1 */
<3 RK_PC7 5 &pcfg_pull_none>,
/* bt656_d2m1 */
<3 RK_PD0 5 &pcfg_pull_none>,
/* bt656_d3m1 */
<3 RK_PD1 5 &pcfg_pull_none>,
/* bt656_d4m1 */
<3 RK_PD2 5 &pcfg_pull_none>,
/* bt656_d5m1 */
<3 RK_PD3 5 &pcfg_pull_none>,
/* bt656_d6m1 */
<3 RK_PD4 5 &pcfg_pull_none>,
/* bt656_d7m1 */
<3 RK_PD5 5 &pcfg_pull_none>;
};
};
bt1120 {
/omit-if-no-ref/
bt1120_pins: bt1120-pins {
rockchip,pins =
/* bt1120_clk */
<3 RK_PA6 2 &pcfg_pull_none>,
/* bt1120_d0 */
<3 RK_PA1 2 &pcfg_pull_none>,
/* bt1120_d1 */
<3 RK_PA2 2 &pcfg_pull_none>,
/* bt1120_d2 */
<3 RK_PA3 2 &pcfg_pull_none>,
/* bt1120_d3 */
<3 RK_PA4 2 &pcfg_pull_none>,
/* bt1120_d4 */
<3 RK_PA5 2 &pcfg_pull_none>,
/* bt1120_d5 */
<3 RK_PA7 2 &pcfg_pull_none>,
/* bt1120_d6 */
<3 RK_PB0 2 &pcfg_pull_none>,
/* bt1120_d7 */
<3 RK_PB1 2 &pcfg_pull_none>,
/* bt1120_d8 */
<3 RK_PB2 2 &pcfg_pull_none>,
/* bt1120_d9 */
<3 RK_PB3 2 &pcfg_pull_none>,
/* bt1120_d10 */
<3 RK_PB4 2 &pcfg_pull_none>,
/* bt1120_d11 */
<3 RK_PB5 2 &pcfg_pull_none>,
/* bt1120_d12 */
<3 RK_PB6 2 &pcfg_pull_none>,
/* bt1120_d13 */
<3 RK_PC1 2 &pcfg_pull_none>,
/* bt1120_d14 */
<3 RK_PC2 2 &pcfg_pull_none>,
/* bt1120_d15 */
<3 RK_PC3 2 &pcfg_pull_none>;
};
};
cam {
/omit-if-no-ref/
cam_clkout0: cam-clkout0 {
rockchip,pins =
/* cam_clkout0 */
<4 RK_PA7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
cam_clkout1: cam-clkout1 {
rockchip,pins =
/* cam_clkout1 */
<4 RK_PB0 1 &pcfg_pull_none>;
};
};
can0 {
/omit-if-no-ref/
can0m0_pins: can0m0-pins {
rockchip,pins =
/* can0_rxm0 */
<0 RK_PB4 2 &pcfg_pull_none>,
/* can0_txm0 */
<0 RK_PB3 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
can0m1_pins: can0m1-pins {
rockchip,pins =
/* can0_rxm1 */
<2 RK_PA2 4 &pcfg_pull_none>,
/* can0_txm1 */
<2 RK_PA1 4 &pcfg_pull_none>;
};
};
can1 {
/omit-if-no-ref/
can1m0_pins: can1m0-pins {
rockchip,pins =
/* can1_rxm0 */
<1 RK_PA0 3 &pcfg_pull_none>,
/* can1_txm0 */
<1 RK_PA1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
can1m1_pins: can1m1-pins {
rockchip,pins =
/* can1_rxm1 */
<4 RK_PC2 3 &pcfg_pull_none>,
/* can1_txm1 */
<4 RK_PC3 3 &pcfg_pull_none>;
};
};
can2 {
/omit-if-no-ref/
can2m0_pins: can2m0-pins {
rockchip,pins =
/* can2_rxm0 */
<4 RK_PB4 3 &pcfg_pull_none>,
/* can2_txm0 */
<4 RK_PB5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
can2m1_pins: can2m1-pins {
rockchip,pins =
/* can2_rxm1 */
<2 RK_PB1 4 &pcfg_pull_none>,
/* can2_txm1 */
<2 RK_PB2 4 &pcfg_pull_none>;
};
};
cif {
/omit-if-no-ref/
cif_clk: cif-clk {
rockchip,pins =
/* cif_clkout */
<4 RK_PC0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
cif_dvp_clk: cif-dvp-clk {
rockchip,pins =
/* cif_clkin */
<4 RK_PC1 1 &pcfg_pull_none>,
/* cif_href */
<4 RK_PB6 1 &pcfg_pull_none>,
/* cif_vsync */
<4 RK_PB7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
cif_dvp_bus16: cif-dvp-bus16 {
rockchip,pins =
/* cif_d8 */
<3 RK_PD6 1 &pcfg_pull_none>,
/* cif_d9 */
<3 RK_PD7 1 &pcfg_pull_none>,
/* cif_d10 */
<4 RK_PA0 1 &pcfg_pull_none>,
/* cif_d11 */
<4 RK_PA1 1 &pcfg_pull_none>,
/* cif_d12 */
<4 RK_PA2 1 &pcfg_pull_none>,
/* cif_d13 */
<4 RK_PA3 1 &pcfg_pull_none>,
/* cif_d14 */
<4 RK_PA4 1 &pcfg_pull_none>,
/* cif_d15 */
<4 RK_PA5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
cif_dvp_bus8: cif-dvp-bus8 {
rockchip,pins =
/* cif_d0 */
<3 RK_PC6 1 &pcfg_pull_none>,
/* cif_d1 */
<3 RK_PC7 1 &pcfg_pull_none>,
/* cif_d2 */
<3 RK_PD0 1 &pcfg_pull_none>,
/* cif_d3 */
<3 RK_PD1 1 &pcfg_pull_none>,
/* cif_d4 */
<3 RK_PD2 1 &pcfg_pull_none>,
/* cif_d5 */
<3 RK_PD3 1 &pcfg_pull_none>,
/* cif_d6 */
<3 RK_PD4 1 &pcfg_pull_none>,
/* cif_d7 */
<3 RK_PD5 1 &pcfg_pull_none>;
};
};
clk32k {
/omit-if-no-ref/
clk32k_in: clk32k-in {
rockchip,pins =
/* clk32k_in */
<0 RK_PB0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
clk32k_out0: clk32k-out0 {
rockchip,pins =
/* clk32k_out0 */
<0 RK_PB0 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
clk32k_out1: clk32k-out1 {
rockchip,pins =
/* clk32k_out1 */
<2 RK_PC6 1 &pcfg_pull_none>;
};
};
cpu {
/omit-if-no-ref/
cpu_pins: cpu-pins {
rockchip,pins =
/* cpu_avs */
<0 RK_PB7 2 &pcfg_pull_none>;
};
};
ebc {
/omit-if-no-ref/
ebc_extern: ebc-extern {
rockchip,pins =
/* ebc_sdce1 */
<4 RK_PA7 2 &pcfg_pull_none>,
/* ebc_sdce2 */
<4 RK_PB0 2 &pcfg_pull_none>,
/* ebc_sdce3 */
<4 RK_PB1 2 &pcfg_pull_none>,
/* ebc_sdshr */
<4 RK_PB5 2 &pcfg_pull_none>,
/* ebc_vcom */
<4 RK_PB2 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
ebc_pins: ebc-pins {
rockchip,pins =
/* ebc_gdclk */
<4 RK_PC0 2 &pcfg_pull_none>,
/* ebc_gdoe */
<4 RK_PB3 2 &pcfg_pull_none>,
/* ebc_gdsp */
<4 RK_PB4 2 &pcfg_pull_none>,
/* ebc_sdce0 */
<4 RK_PA6 2 &pcfg_pull_none>,
/* ebc_sdclk */
<4 RK_PC1 2 &pcfg_pull_none>,
/* ebc_sddo0 */
<3 RK_PC6 2 &pcfg_pull_none>,
/* ebc_sddo1 */
<3 RK_PC7 2 &pcfg_pull_none>,
/* ebc_sddo2 */
<3 RK_PD0 2 &pcfg_pull_none>,
/* ebc_sddo3 */
<3 RK_PD1 2 &pcfg_pull_none>,
/* ebc_sddo4 */
<3 RK_PD2 2 &pcfg_pull_none>,
/* ebc_sddo5 */
<3 RK_PD3 2 &pcfg_pull_none>,
/* ebc_sddo6 */
<3 RK_PD4 2 &pcfg_pull_none>,
/* ebc_sddo7 */
<3 RK_PD5 2 &pcfg_pull_none>,
/* ebc_sddo8 */
<3 RK_PD6 2 &pcfg_pull_none>,
/* ebc_sddo9 */
<3 RK_PD7 2 &pcfg_pull_none>,
/* ebc_sddo10 */
<4 RK_PA0 2 &pcfg_pull_none>,
/* ebc_sddo11 */
<4 RK_PA1 2 &pcfg_pull_none>,
/* ebc_sddo12 */
<4 RK_PA2 2 &pcfg_pull_none>,
/* ebc_sddo13 */
<4 RK_PA3 2 &pcfg_pull_none>,
/* ebc_sddo14 */
<4 RK_PA4 2 &pcfg_pull_none>,
/* ebc_sddo15 */
<4 RK_PA5 2 &pcfg_pull_none>,
/* ebc_sdle */
<4 RK_PB6 2 &pcfg_pull_none>,
/* ebc_sdoe */
<4 RK_PB7 2 &pcfg_pull_none>;
};
};
edpdp {
/omit-if-no-ref/
edpdpm0_pins: edpdpm0-pins {
rockchip,pins =
/* edpdp_hpdinm0 */
<4 RK_PC4 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
edpdpm1_pins: edpdpm1-pins {
rockchip,pins =
/* edpdp_hpdinm1 */
<0 RK_PC2 2 &pcfg_pull_none>;
};
};
emmc {
/omit-if-no-ref/
emmc_rstnout: emmc-rstnout {
rockchip,pins =
/* emmc_rstn */
<1 RK_PC7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
emmc_bus8: emmc-bus8 {
rockchip,pins =
/* emmc_d0 */
<1 RK_PB4 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d1 */
<1 RK_PB5 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d2 */
<1 RK_PB6 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d3 */
<1 RK_PB7 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d4 */
<1 RK_PC0 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d5 */
<1 RK_PC1 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d6 */
<1 RK_PC2 1 &pcfg_pull_up_drv_level_2>,
/* emmc_d7 */
<1 RK_PC3 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
emmc_clk: emmc-clk {
rockchip,pins =
/* emmc_clkout */
<1 RK_PC5 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
emmc_cmd: emmc-cmd {
rockchip,pins =
/* emmc_cmd */
<1 RK_PC4 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
emmc_datastrobe: emmc-datastrobe {
rockchip,pins =
/* emmc_datastrobe */
<1 RK_PC6 1 &pcfg_pull_none>;
};
};
eth0 {
/omit-if-no-ref/
eth0_pins: eth0-pins {
rockchip,pins =
/* eth0_refclko25m */
<2 RK_PC1 2 &pcfg_pull_none>;
};
};
eth1 {
/omit-if-no-ref/
eth1m0_pins: eth1m0-pins {
rockchip,pins =
/* eth1_refclko25mm0 */
<3 RK_PB0 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
eth1m1_pins: eth1m1-pins {
rockchip,pins =
/* eth1_refclko25mm1 */
<4 RK_PB3 3 &pcfg_pull_none>;
};
};
flash {
/omit-if-no-ref/
flash_pins: flash-pins {
rockchip,pins =
/* flash_ale */
<1 RK_PD0 2 &pcfg_pull_none>,
/* flash_cle */
<1 RK_PC6 3 &pcfg_pull_none>,
/* flash_cs0n */
<1 RK_PD3 2 &pcfg_pull_none>,
/* flash_cs1n */
<1 RK_PD4 2 &pcfg_pull_none>,
/* flash_d0 */
<1 RK_PB4 2 &pcfg_pull_none>,
/* flash_d1 */
<1 RK_PB5 2 &pcfg_pull_none>,
/* flash_d2 */
<1 RK_PB6 2 &pcfg_pull_none>,
/* flash_d3 */
<1 RK_PB7 2 &pcfg_pull_none>,
/* flash_d4 */
<1 RK_PC0 2 &pcfg_pull_none>,
/* flash_d5 */
<1 RK_PC1 2 &pcfg_pull_none>,
/* flash_d6 */
<1 RK_PC2 2 &pcfg_pull_none>,
/* flash_d7 */
<1 RK_PC3 2 &pcfg_pull_none>,
/* flash_dqs */
<1 RK_PC5 2 &pcfg_pull_none>,
/* flash_rdn */
<1 RK_PD2 2 &pcfg_pull_none>,
/* flash_rdy */
<1 RK_PD1 2 &pcfg_pull_none>,
/* flash_volsel */
<0 RK_PA7 1 &pcfg_pull_none>,
/* flash_wpn */
<1 RK_PC7 3 &pcfg_pull_none>,
/* flash_wrn */
<1 RK_PC4 2 &pcfg_pull_none>;
};
};
fspi {
/omit-if-no-ref/
fspi_pins: fspi-pins {
rockchip,pins =
/* fspi_clk */
<1 RK_PD0 1 &pcfg_pull_none>,
/* fspi_cs0n */
<1 RK_PD3 1 &pcfg_pull_none>,
/* fspi_d0 */
<1 RK_PD1 1 &pcfg_pull_none>,
/* fspi_d1 */
<1 RK_PD2 1 &pcfg_pull_none>,
/* fspi_d2 */
<1 RK_PC7 2 &pcfg_pull_none>,
/* fspi_d3 */
<1 RK_PD4 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
fspi_cs1: fspi-cs1 {
rockchip,pins =
/* fspi_cs1n */
<1 RK_PC6 2 &pcfg_pull_up>;
};
};
gmac0 {
/omit-if-no-ref/
gmac0_miim: gmac0-miim {
rockchip,pins =
/* gmac0_mdc */
<2 RK_PC3 2 &pcfg_pull_none>,
/* gmac0_mdio */
<2 RK_PC4 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_clkinout: gmac0-clkinout {
rockchip,pins =
/* gmac0_mclkinout */
<2 RK_PC2 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_rx_er: gmac0-rx-er {
rockchip,pins =
/* gmac0_rxer */
<2 RK_PC5 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_rx_bus2: gmac0-rx-bus2 {
rockchip,pins =
/* gmac0_rxd0 */
<2 RK_PB6 1 &pcfg_pull_none>,
/* gmac0_rxd1 */
<2 RK_PB7 2 &pcfg_pull_none>,
/* gmac0_rxdvcrs */
<2 RK_PC0 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_tx_bus2: gmac0-tx-bus2 {
rockchip,pins =
/* gmac0_txd0 */
<2 RK_PB3 1 &pcfg_pull_none_drv_level_2>,
/* gmac0_txd1 */
<2 RK_PB4 1 &pcfg_pull_none_drv_level_2>,
/* gmac0_txen */
<2 RK_PB5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_rgmii_clk: gmac0-rgmii-clk {
rockchip,pins =
/* gmac0_rxclk */
<2 RK_PA5 2 &pcfg_pull_none>,
/* gmac0_txclk */
<2 RK_PB0 2 &pcfg_pull_none_drv_level_1>;
};
/omit-if-no-ref/
gmac0_rgmii_bus: gmac0-rgmii-bus {
rockchip,pins =
/* gmac0_rxd2 */
<2 RK_PA3 2 &pcfg_pull_none>,
/* gmac0_rxd3 */
<2 RK_PA4 2 &pcfg_pull_none>,
/* gmac0_txd2 */
<2 RK_PA6 2 &pcfg_pull_none_drv_level_2>,
/* gmac0_txd3 */
<2 RK_PA7 2 &pcfg_pull_none_drv_level_2>;
};
};
gmac1 {
/omit-if-no-ref/
gmac1m0_miim: gmac1m0-miim {
rockchip,pins =
/* gmac1_mdcm0 */
<3 RK_PC4 3 &pcfg_pull_none>,
/* gmac1_mdiom0 */
<3 RK_PC5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_clkinout: gmac1m0-clkinout {
rockchip,pins =
/* gmac1_mclkinoutm0 */
<3 RK_PC0 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_rx_er: gmac1m0-rx-er {
rockchip,pins =
/* gmac1_rxerm0 */
<3 RK_PB4 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_rx_bus2: gmac1m0-rx-bus2 {
rockchip,pins =
/* gmac1_rxd0m0 */
<3 RK_PB1 3 &pcfg_pull_none>,
/* gmac1_rxd1m0 */
<3 RK_PB2 3 &pcfg_pull_none>,
/* gmac1_rxdvcrsm0 */
<3 RK_PB3 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_tx_bus2: gmac1m0-tx-bus2 {
rockchip,pins =
/* gmac1_txd0m0 */
<3 RK_PB5 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txd1m0 */
<3 RK_PB6 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txenm0 */
<3 RK_PB7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_rgmii_clk: gmac1m0-rgmii-clk {
rockchip,pins =
/* gmac1_rxclkm0 */
<3 RK_PA7 3 &pcfg_pull_none>,
/* gmac1_txclkm0 */
<3 RK_PA6 3 &pcfg_pull_none_drv_level_1>;
};
/omit-if-no-ref/
gmac1m0_rgmii_bus: gmac1m0-rgmii-bus {
rockchip,pins =
/* gmac1_rxd2m0 */
<3 RK_PA4 3 &pcfg_pull_none>,
/* gmac1_rxd3m0 */
<3 RK_PA5 3 &pcfg_pull_none>,
/* gmac1_txd2m0 */
<3 RK_PA2 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txd3m0 */
<3 RK_PA3 3 &pcfg_pull_none_drv_level_2>;
};
/omit-if-no-ref/
gmac1m1_miim: gmac1m1-miim {
rockchip,pins =
/* gmac1_mdcm1 */
<4 RK_PB6 3 &pcfg_pull_none>,
/* gmac1_mdiom1 */
<4 RK_PB7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_clkinout: gmac1m1-clkinout {
rockchip,pins =
/* gmac1_mclkinoutm1 */
<4 RK_PC1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_rx_er: gmac1m1-rx-er {
rockchip,pins =
/* gmac1_rxerm1 */
<4 RK_PB2 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_rx_bus2: gmac1m1-rx-bus2 {
rockchip,pins =
/* gmac1_rxd0m1 */
<4 RK_PA7 3 &pcfg_pull_none>,
/* gmac1_rxd1m1 */
<4 RK_PB0 3 &pcfg_pull_none>,
/* gmac1_rxdvcrsm1 */
<4 RK_PB1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_tx_bus2: gmac1m1-tx-bus2 {
rockchip,pins =
/* gmac1_txd0m1 */
<4 RK_PA4 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txd1m1 */
<4 RK_PA5 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txenm1 */
<4 RK_PA6 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_rgmii_clk: gmac1m1-rgmii-clk {
rockchip,pins =
/* gmac1_rxclkm1 */
<4 RK_PA3 3 &pcfg_pull_none>,
/* gmac1_txclkm1 */
<4 RK_PA0 3 &pcfg_pull_none_drv_level_1>;
};
/omit-if-no-ref/
gmac1m1_rgmii_bus: gmac1m1-rgmii-bus {
rockchip,pins =
/* gmac1_rxd2m1 */
<4 RK_PA1 3 &pcfg_pull_none>,
/* gmac1_rxd3m1 */
<4 RK_PA2 3 &pcfg_pull_none>,
/* gmac1_txd2m1 */
<3 RK_PD6 3 &pcfg_pull_none_drv_level_2>,
/* gmac1_txd3m1 */
<3 RK_PD7 3 &pcfg_pull_none_drv_level_2>;
};
};
gpu {
/omit-if-no-ref/
gpu_pins: gpu-pins {
rockchip,pins =
/* gpu_avs */
<0 RK_PC0 2 &pcfg_pull_none>,
/* gpu_pwren */
<0 RK_PA6 4 &pcfg_pull_none>;
};
};
hdmitx {
/omit-if-no-ref/
hdmitxm0_cec: hdmitxm0-cec {
rockchip,pins =
/* hdmitxm0_cec */
<4 RK_PD1 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
hdmitxm1_cec: hdmitxm1-cec {
rockchip,pins =
/* hdmitxm1_cec */
<0 RK_PC7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
hdmitx_scl: hdmitx-scl {
rockchip,pins =
/* hdmitx_scl */
<4 RK_PC7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
hdmitx_sda: hdmitx-sda {
rockchip,pins =
/* hdmitx_sda */
<4 RK_PD0 1 &pcfg_pull_none>;
};
};
i2c0 {
/omit-if-no-ref/
i2c0_xfer: i2c0-xfer {
rockchip,pins =
/* i2c0_scl */
<0 RK_PB1 1 &pcfg_pull_none_smt>,
/* i2c0_sda */
<0 RK_PB2 1 &pcfg_pull_none_smt>;
};
};
i2c1 {
/omit-if-no-ref/
i2c1_xfer: i2c1-xfer {
rockchip,pins =
/* i2c1_scl */
<0 RK_PB3 1 &pcfg_pull_none_smt>,
/* i2c1_sda */
<0 RK_PB4 1 &pcfg_pull_none_smt>;
};
};
i2c2 {
/omit-if-no-ref/
i2c2m0_xfer: i2c2m0-xfer {
rockchip,pins =
/* i2c2_sclm0 */
<0 RK_PB5 1 &pcfg_pull_none_smt>,
/* i2c2_sdam0 */
<0 RK_PB6 1 &pcfg_pull_none_smt>;
};
/omit-if-no-ref/
i2c2m1_xfer: i2c2m1-xfer {
rockchip,pins =
/* i2c2_sclm1 */
<4 RK_PB5 1 &pcfg_pull_none_smt>,
/* i2c2_sdam1 */
<4 RK_PB4 1 &pcfg_pull_none_smt>;
};
};
i2c3 {
/omit-if-no-ref/
i2c3m0_xfer: i2c3m0-xfer {
rockchip,pins =
/* i2c3_sclm0 */
<1 RK_PA1 1 &pcfg_pull_none_smt>,
/* i2c3_sdam0 */
<1 RK_PA0 1 &pcfg_pull_none_smt>;
};
/omit-if-no-ref/
i2c3m1_xfer: i2c3m1-xfer {
rockchip,pins =
/* i2c3_sclm1 */
<3 RK_PB5 4 &pcfg_pull_none_smt>,
/* i2c3_sdam1 */
<3 RK_PB6 4 &pcfg_pull_none_smt>;
};
};
i2c4 {
/omit-if-no-ref/
i2c4m0_xfer: i2c4m0-xfer {
rockchip,pins =
/* i2c4_sclm0 */
<4 RK_PB3 1 &pcfg_pull_none_smt>,
/* i2c4_sdam0 */
<4 RK_PB2 1 &pcfg_pull_none_smt>;
};
/omit-if-no-ref/
i2c4m1_xfer: i2c4m1-xfer {
rockchip,pins =
/* i2c4_sclm1 */
<2 RK_PB2 2 &pcfg_pull_none_smt>,
/* i2c4_sdam1 */
<2 RK_PB1 2 &pcfg_pull_none_smt>;
};
};
i2c5 {
/omit-if-no-ref/
i2c5m0_xfer: i2c5m0-xfer {
rockchip,pins =
/* i2c5_sclm0 */
<3 RK_PB3 4 &pcfg_pull_none_smt>,
/* i2c5_sdam0 */
<3 RK_PB4 4 &pcfg_pull_none_smt>;
};
/omit-if-no-ref/
i2c5m1_xfer: i2c5m1-xfer {
rockchip,pins =
/* i2c5_sclm1 */
<4 RK_PC7 2 &pcfg_pull_none_smt>,
/* i2c5_sdam1 */
<4 RK_PD0 2 &pcfg_pull_none_smt>;
};
};
i2s1 {
/omit-if-no-ref/
i2s1m0_lrckrx: i2s1m0-lrckrx {
rockchip,pins =
/* i2s1m0_lrckrx */
<1 RK_PA6 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_lrcktx: i2s1m0-lrcktx {
rockchip,pins =
/* i2s1m0_lrcktx */
<1 RK_PA5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_mclk: i2s1m0-mclk {
rockchip,pins =
/* i2s1m0_mclk */
<1 RK_PA2 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sclkrx: i2s1m0-sclkrx {
rockchip,pins =
/* i2s1m0_sclkrx */
<1 RK_PA4 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sclktx: i2s1m0-sclktx {
rockchip,pins =
/* i2s1m0_sclktx */
<1 RK_PA3 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdi0: i2s1m0-sdi0 {
rockchip,pins =
/* i2s1m0_sdi0 */
<1 RK_PB3 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdi1: i2s1m0-sdi1 {
rockchip,pins =
/* i2s1m0_sdi1 */
<1 RK_PB2 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdi2: i2s1m0-sdi2 {
rockchip,pins =
/* i2s1m0_sdi2 */
<1 RK_PB1 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdi3: i2s1m0-sdi3 {
rockchip,pins =
/* i2s1m0_sdi3 */
<1 RK_PB0 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdo0: i2s1m0-sdo0 {
rockchip,pins =
/* i2s1m0_sdo0 */
<1 RK_PA7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdo1: i2s1m0-sdo1 {
rockchip,pins =
/* i2s1m0_sdo1 */
<1 RK_PB0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdo2: i2s1m0-sdo2 {
rockchip,pins =
/* i2s1m0_sdo2 */
<1 RK_PB1 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m0_sdo3: i2s1m0-sdo3 {
rockchip,pins =
/* i2s1m0_sdo3 */
<1 RK_PB2 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_lrckrx: i2s1m1-lrckrx {
rockchip,pins =
/* i2s1m1_lrckrx */
<4 RK_PA7 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_lrcktx: i2s1m1-lrcktx {
rockchip,pins =
/* i2s1m1_lrcktx */
<3 RK_PD0 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_mclk: i2s1m1-mclk {
rockchip,pins =
/* i2s1m1_mclk */
<3 RK_PC6 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sclkrx: i2s1m1-sclkrx {
rockchip,pins =
/* i2s1m1_sclkrx */
<4 RK_PA6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sclktx: i2s1m1-sclktx {
rockchip,pins =
/* i2s1m1_sclktx */
<3 RK_PC7 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdi0: i2s1m1-sdi0 {
rockchip,pins =
/* i2s1m1_sdi0 */
<3 RK_PD2 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdi1: i2s1m1-sdi1 {
rockchip,pins =
/* i2s1m1_sdi1 */
<3 RK_PD3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdi2: i2s1m1-sdi2 {
rockchip,pins =
/* i2s1m1_sdi2 */
<3 RK_PD4 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdi3: i2s1m1-sdi3 {
rockchip,pins =
/* i2s1m1_sdi3 */
<3 RK_PD5 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdo0: i2s1m1-sdo0 {
rockchip,pins =
/* i2s1m1_sdo0 */
<3 RK_PD1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdo1: i2s1m1-sdo1 {
rockchip,pins =
/* i2s1m1_sdo1 */
<4 RK_PB0 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdo2: i2s1m1-sdo2 {
rockchip,pins =
/* i2s1m1_sdo2 */
<4 RK_PB1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m1_sdo3: i2s1m1-sdo3 {
rockchip,pins =
/* i2s1m1_sdo3 */
<4 RK_PB5 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_lrckrx: i2s1m2-lrckrx {
rockchip,pins =
/* i2s1m2_lrckrx */
<3 RK_PC5 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_lrcktx: i2s1m2-lrcktx {
rockchip,pins =
/* i2s1m2_lrcktx */
<2 RK_PD2 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_mclk: i2s1m2-mclk {
rockchip,pins =
/* i2s1m2_mclk */
<2 RK_PD0 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sclkrx: i2s1m2-sclkrx {
rockchip,pins =
/* i2s1m2_sclkrx */
<3 RK_PC3 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sclktx: i2s1m2-sclktx {
rockchip,pins =
/* i2s1m2_sclktx */
<2 RK_PD1 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdi0: i2s1m2-sdi0 {
rockchip,pins =
/* i2s1m2_sdi0 */
<2 RK_PD3 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdi1: i2s1m2-sdi1 {
rockchip,pins =
/* i2s1m2_sdi1 */
<2 RK_PD4 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdi2: i2s1m2-sdi2 {
rockchip,pins =
/* i2s1m2_sdi2 */
<2 RK_PD5 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdi3: i2s1m2-sdi3 {
rockchip,pins =
/* i2s1m2_sdi3 */
<2 RK_PD6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdo0: i2s1m2-sdo0 {
rockchip,pins =
/* i2s1m2_sdo0 */
<2 RK_PD7 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdo1: i2s1m2-sdo1 {
rockchip,pins =
/* i2s1m2_sdo1 */
<3 RK_PA0 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdo2: i2s1m2-sdo2 {
rockchip,pins =
/* i2s1m2_sdo2 */
<3 RK_PC1 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s1m2_sdo3: i2s1m2-sdo3 {
rockchip,pins =
/* i2s1m2_sdo3 */
<3 RK_PC2 5 &pcfg_pull_none>;
};
};
i2s2 {
/omit-if-no-ref/
i2s2m0_lrckrx: i2s2m0-lrckrx {
rockchip,pins =
/* i2s2m0_lrckrx */
<2 RK_PC0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_lrcktx: i2s2m0-lrcktx {
rockchip,pins =
/* i2s2m0_lrcktx */
<2 RK_PC3 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_mclk: i2s2m0-mclk {
rockchip,pins =
/* i2s2m0_mclk */
<2 RK_PC1 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_sclkrx: i2s2m0-sclkrx {
rockchip,pins =
/* i2s2m0_sclkrx */
<2 RK_PB7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_sclktx: i2s2m0-sclktx {
rockchip,pins =
/* i2s2m0_sclktx */
<2 RK_PC2 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_sdi: i2s2m0-sdi {
rockchip,pins =
/* i2s2m0_sdi */
<2 RK_PC5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m0_sdo: i2s2m0-sdo {
rockchip,pins =
/* i2s2m0_sdo */
<2 RK_PC4 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_lrckrx: i2s2m1-lrckrx {
rockchip,pins =
/* i2s2m1_lrckrx */
<4 RK_PA5 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_lrcktx: i2s2m1-lrcktx {
rockchip,pins =
/* i2s2m1_lrcktx */
<4 RK_PA4 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_mclk: i2s2m1-mclk {
rockchip,pins =
/* i2s2m1_mclk */
<4 RK_PB6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_sclkrx: i2s2m1-sclkrx {
rockchip,pins =
/* i2s2m1_sclkrx */
<4 RK_PC1 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_sclktx: i2s2m1-sclktx {
rockchip,pins =
/* i2s2m1_sclktx */
<4 RK_PB7 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_sdi: i2s2m1-sdi {
rockchip,pins =
/* i2s2m1_sdi */
<4 RK_PB2 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s2m1_sdo: i2s2m1-sdo {
rockchip,pins =
/* i2s2m1_sdo */
<4 RK_PB3 5 &pcfg_pull_none>;
};
};
i2s3 {
/omit-if-no-ref/
i2s3m0_lrck: i2s3m0-lrck {
rockchip,pins =
/* i2s3m0_lrck */
<3 RK_PA4 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m0_mclk: i2s3m0-mclk {
rockchip,pins =
/* i2s3m0_mclk */
<3 RK_PA2 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m0_sclk: i2s3m0-sclk {
rockchip,pins =
/* i2s3m0_sclk */
<3 RK_PA3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m0_sdi: i2s3m0-sdi {
rockchip,pins =
/* i2s3m0_sdi */
<3 RK_PA6 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m0_sdo: i2s3m0-sdo {
rockchip,pins =
/* i2s3m0_sdo */
<3 RK_PA5 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m1_lrck: i2s3m1-lrck {
rockchip,pins =
/* i2s3m1_lrck */
<4 RK_PC4 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m1_mclk: i2s3m1-mclk {
rockchip,pins =
/* i2s3m1_mclk */
<4 RK_PC2 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m1_sclk: i2s3m1-sclk {
rockchip,pins =
/* i2s3m1_sclk */
<4 RK_PC3 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m1_sdi: i2s3m1-sdi {
rockchip,pins =
/* i2s3m1_sdi */
<4 RK_PC6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
i2s3m1_sdo: i2s3m1-sdo {
rockchip,pins =
/* i2s3m1_sdo */
<4 RK_PC5 5 &pcfg_pull_none>;
};
};
isp {
/omit-if-no-ref/
isp_pins: isp-pins {
rockchip,pins =
/* isp_flashtrigin */
<4 RK_PB4 4 &pcfg_pull_none>,
/* isp_flashtrigout */
<4 RK_PA6 1 &pcfg_pull_none>,
/* isp_prelighttrig */
<4 RK_PB1 1 &pcfg_pull_none>;
};
};
jtag {
/omit-if-no-ref/
jtag_pins: jtag-pins {
rockchip,pins =
/* jtag_tck */
<1 RK_PD7 2 &pcfg_pull_none>,
/* jtag_tms */
<2 RK_PA0 2 &pcfg_pull_none>;
};
};
lcdc {
/omit-if-no-ref/
lcdc_ctl: lcdc-ctl {
rockchip,pins =
/* lcdc_clk */
<3 RK_PA0 1 &pcfg_pull_none>,
/* lcdc_d0 */
<2 RK_PD0 1 &pcfg_pull_none>,
/* lcdc_d1 */
<2 RK_PD1 1 &pcfg_pull_none>,
/* lcdc_d2 */
<2 RK_PD2 1 &pcfg_pull_none>,
/* lcdc_d3 */
<2 RK_PD3 1 &pcfg_pull_none>,
/* lcdc_d4 */
<2 RK_PD4 1 &pcfg_pull_none>,
/* lcdc_d5 */
<2 RK_PD5 1 &pcfg_pull_none>,
/* lcdc_d6 */
<2 RK_PD6 1 &pcfg_pull_none>,
/* lcdc_d7 */
<2 RK_PD7 1 &pcfg_pull_none>,
/* lcdc_d8 */
<3 RK_PA1 1 &pcfg_pull_none>,
/* lcdc_d9 */
<3 RK_PA2 1 &pcfg_pull_none>,
/* lcdc_d10 */
<3 RK_PA3 1 &pcfg_pull_none>,
/* lcdc_d11 */
<3 RK_PA4 1 &pcfg_pull_none>,
/* lcdc_d12 */
<3 RK_PA5 1 &pcfg_pull_none>,
/* lcdc_d13 */
<3 RK_PA6 1 &pcfg_pull_none>,
/* lcdc_d14 */
<3 RK_PA7 1 &pcfg_pull_none>,
/* lcdc_d15 */
<3 RK_PB0 1 &pcfg_pull_none>,
/* lcdc_d16 */
<3 RK_PB1 1 &pcfg_pull_none>,
/* lcdc_d17 */
<3 RK_PB2 1 &pcfg_pull_none>,
/* lcdc_d18 */
<3 RK_PB3 1 &pcfg_pull_none>,
/* lcdc_d19 */
<3 RK_PB4 1 &pcfg_pull_none>,
/* lcdc_d20 */
<3 RK_PB5 1 &pcfg_pull_none>,
/* lcdc_d21 */
<3 RK_PB6 1 &pcfg_pull_none>,
/* lcdc_d22 */
<3 RK_PB7 1 &pcfg_pull_none>,
/* lcdc_d23 */
<3 RK_PC0 1 &pcfg_pull_none>,
/* lcdc_den */
<3 RK_PC3 1 &pcfg_pull_none>,
/* lcdc_hsync */
<3 RK_PC1 1 &pcfg_pull_none>,
/* lcdc_vsync */
<3 RK_PC2 1 &pcfg_pull_none>;
};
};
mcu {
/omit-if-no-ref/
mcu_pins: mcu-pins {
rockchip,pins =
/* mcu_jtagtck */
<0 RK_PB4 4 &pcfg_pull_none>,
/* mcu_jtagtdi */
<0 RK_PC1 4 &pcfg_pull_none>,
/* mcu_jtagtdo */
<0 RK_PB3 4 &pcfg_pull_none>,
/* mcu_jtagtms */
<0 RK_PC2 4 &pcfg_pull_none>,
/* mcu_jtagtrstn */
<0 RK_PC3 4 &pcfg_pull_none>;
};
};
npu {
/omit-if-no-ref/
npu_pins: npu-pins {
rockchip,pins =
/* npu_avs */
<0 RK_PC1 2 &pcfg_pull_none>;
};
};
pcie20 {
/omit-if-no-ref/
pcie20m0_pins: pcie20m0-pins {
rockchip,pins =
/* pcie20_clkreqnm0 */
<0 RK_PA5 3 &pcfg_pull_none>,
/* pcie20_perstnm0 */
<0 RK_PB6 3 &pcfg_pull_none>,
/* pcie20_wakenm0 */
<0 RK_PB5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie20m1_pins: pcie20m1-pins {
rockchip,pins =
/* pcie20_clkreqnm1 */
<2 RK_PD0 4 &pcfg_pull_none>,
/* pcie20_perstnm1 */
<3 RK_PC1 4 &pcfg_pull_none>,
/* pcie20_wakenm1 */
<2 RK_PD1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie20m2_pins: pcie20m2-pins {
rockchip,pins =
/* pcie20_clkreqnm2 */
<1 RK_PB0 4 &pcfg_pull_none>,
/* pcie20_perstnm2 */
<1 RK_PB2 4 &pcfg_pull_none>,
/* pcie20_wakenm2 */
<1 RK_PB1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie20_buttonrstn: pcie20-buttonrstn {
rockchip,pins =
/* pcie20_buttonrstn */
<0 RK_PB4 3 &pcfg_pull_none>;
};
};
pcie30x1 {
/omit-if-no-ref/
pcie30x1m0_pins: pcie30x1m0-pins {
rockchip,pins =
/* pcie30x1_clkreqnm0 */
<0 RK_PA4 3 &pcfg_pull_none>,
/* pcie30x1_perstnm0 */
<0 RK_PC3 3 &pcfg_pull_none>,
/* pcie30x1_wakenm0 */
<0 RK_PC2 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x1m1_pins: pcie30x1m1-pins {
rockchip,pins =
/* pcie30x1_clkreqnm1 */
<2 RK_PD2 4 &pcfg_pull_none>,
/* pcie30x1_perstnm1 */
<3 RK_PA1 4 &pcfg_pull_none>,
/* pcie30x1_wakenm1 */
<2 RK_PD3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x1m2_pins: pcie30x1m2-pins {
rockchip,pins =
/* pcie30x1_clkreqnm2 */
<1 RK_PA5 4 &pcfg_pull_none>,
/* pcie30x1_perstnm2 */
<1 RK_PA2 4 &pcfg_pull_none>,
/* pcie30x1_wakenm2 */
<1 RK_PA3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x1_buttonrstn: pcie30x1-buttonrstn {
rockchip,pins =
/* pcie30x1_buttonrstn */
<0 RK_PB3 3 &pcfg_pull_none>;
};
};
pcie30x2 {
/omit-if-no-ref/
pcie30x2m0_pins: pcie30x2m0-pins {
rockchip,pins =
/* pcie30x2_clkreqnm0 */
<0 RK_PA6 2 &pcfg_pull_none>,
/* pcie30x2_perstnm0 */
<0 RK_PC6 3 &pcfg_pull_none>,
/* pcie30x2_wakenm0 */
<0 RK_PC5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x2m1_pins: pcie30x2m1-pins {
rockchip,pins =
/* pcie30x2_clkreqnm1 */
<2 RK_PD4 4 &pcfg_pull_none>,
/* pcie30x2_perstnm1 */
<2 RK_PD6 4 &pcfg_pull_none>,
/* pcie30x2_wakenm1 */
<2 RK_PD5 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x2m2_pins: pcie30x2m2-pins {
rockchip,pins =
/* pcie30x2_clkreqnm2 */
<4 RK_PC2 4 &pcfg_pull_none>,
/* pcie30x2_perstnm2 */
<4 RK_PC4 4 &pcfg_pull_none>,
/* pcie30x2_wakenm2 */
<4 RK_PC3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pcie30x2_buttonrstn: pcie30x2-buttonrstn {
rockchip,pins =
/* pcie30x2_buttonrstn */
<0 RK_PB0 3 &pcfg_pull_none>;
};
};
pdm {
/omit-if-no-ref/
pdmm0_clk: pdmm0-clk {
rockchip,pins =
/* pdm_clk0m0 */
<1 RK_PA6 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm0_clk1: pdmm0-clk1 {
rockchip,pins =
/* pdmm0_clk1 */
<1 RK_PA4 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm0_sdi0: pdmm0-sdi0 {
rockchip,pins =
/* pdmm0_sdi0 */
<1 RK_PB3 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm0_sdi1: pdmm0-sdi1 {
rockchip,pins =
/* pdmm0_sdi1 */
<1 RK_PB2 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm0_sdi2: pdmm0-sdi2 {
rockchip,pins =
/* pdmm0_sdi2 */
<1 RK_PB1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm0_sdi3: pdmm0-sdi3 {
rockchip,pins =
/* pdmm0_sdi3 */
<1 RK_PB0 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_clk: pdmm1-clk {
rockchip,pins =
/* pdm_clk0m1 */
<3 RK_PD6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_clk1: pdmm1-clk1 {
rockchip,pins =
/* pdmm1_clk1 */
<4 RK_PA0 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_sdi0: pdmm1-sdi0 {
rockchip,pins =
/* pdmm1_sdi0 */
<3 RK_PD7 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_sdi1: pdmm1-sdi1 {
rockchip,pins =
/* pdmm1_sdi1 */
<4 RK_PA1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_sdi2: pdmm1-sdi2 {
rockchip,pins =
/* pdmm1_sdi2 */
<4 RK_PA2 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm1_sdi3: pdmm1-sdi3 {
rockchip,pins =
/* pdmm1_sdi3 */
<4 RK_PA3 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm2_clk1: pdmm2-clk1 {
rockchip,pins =
/* pdmm2_clk1 */
<3 RK_PC4 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm2_sdi0: pdmm2-sdi0 {
rockchip,pins =
/* pdmm2_sdi0 */
<3 RK_PB3 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm2_sdi1: pdmm2-sdi1 {
rockchip,pins =
/* pdmm2_sdi1 */
<3 RK_PB4 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm2_sdi2: pdmm2-sdi2 {
rockchip,pins =
/* pdmm2_sdi2 */
<3 RK_PB7 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pdmm2_sdi3: pdmm2-sdi3 {
rockchip,pins =
/* pdmm2_sdi3 */
<3 RK_PC0 5 &pcfg_pull_none>;
};
};
pmic {
/omit-if-no-ref/
pmic_pins: pmic-pins {
rockchip,pins =
/* pmic_sleep */
<0 RK_PA2 1 &pcfg_pull_none>;
};
};
pmu {
/omit-if-no-ref/
pmu_pins: pmu-pins {
rockchip,pins =
/* pmu_debug0 */
<0 RK_PA5 4 &pcfg_pull_none>,
/* pmu_debug1 */
<0 RK_PA6 3 &pcfg_pull_none>,
/* pmu_debug2 */
<0 RK_PC4 4 &pcfg_pull_none>,
/* pmu_debug3 */
<0 RK_PC5 4 &pcfg_pull_none>,
/* pmu_debug4 */
<0 RK_PC6 4 &pcfg_pull_none>,
/* pmu_debug5 */
<0 RK_PC7 4 &pcfg_pull_none>;
};
};
pwm0 {
/omit-if-no-ref/
pwm0m0_pins: pwm0m0-pins {
rockchip,pins =
/* pwm0_m0 */
<0 RK_PB7 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm0m1_pins: pwm0m1-pins {
rockchip,pins =
/* pwm0_m1 */
<0 RK_PC7 2 &pcfg_pull_none>;
};
};
pwm1 {
/omit-if-no-ref/
pwm1m0_pins: pwm1m0-pins {
rockchip,pins =
/* pwm1_m0 */
<0 RK_PC0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm1m1_pins: pwm1m1-pins {
rockchip,pins =
/* pwm1_m1 */
<0 RK_PB5 4 &pcfg_pull_none>;
};
};
pwm2 {
/omit-if-no-ref/
pwm2m0_pins: pwm2m0-pins {
rockchip,pins =
/* pwm2_m0 */
<0 RK_PC1 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm2m1_pins: pwm2m1-pins {
rockchip,pins =
/* pwm2_m1 */
<0 RK_PB6 4 &pcfg_pull_none>;
};
};
pwm3 {
/omit-if-no-ref/
pwm3_pins: pwm3-pins {
rockchip,pins =
/* pwm3_ir */
<0 RK_PC2 1 &pcfg_pull_none>;
};
};
pwm4 {
/omit-if-no-ref/
pwm4_pins: pwm4-pins {
rockchip,pins =
/* pwm4 */
<0 RK_PC3 1 &pcfg_pull_none>;
};
};
pwm5 {
/omit-if-no-ref/
pwm5_pins: pwm5-pins {
rockchip,pins =
/* pwm5 */
<0 RK_PC4 1 &pcfg_pull_none>;
};
};
pwm6 {
/omit-if-no-ref/
pwm6_pins: pwm6-pins {
rockchip,pins =
/* pwm6 */
<0 RK_PC5 1 &pcfg_pull_none>;
};
};
pwm7 {
/omit-if-no-ref/
pwm7_pins: pwm7-pins {
rockchip,pins =
/* pwm7_ir */
<0 RK_PC6 1 &pcfg_pull_none>;
};
};
pwm8 {
/omit-if-no-ref/
pwm8m0_pins: pwm8m0-pins {
rockchip,pins =
/* pwm8_m0 */
<3 RK_PB1 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm8m1_pins: pwm8m1-pins {
rockchip,pins =
/* pwm8_m1 */
<1 RK_PD5 4 &pcfg_pull_none>;
};
};
pwm9 {
/omit-if-no-ref/
pwm9m0_pins: pwm9m0-pins {
rockchip,pins =
/* pwm9_m0 */
<3 RK_PB2 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm9m1_pins: pwm9m1-pins {
rockchip,pins =
/* pwm9_m1 */
<1 RK_PD6 4 &pcfg_pull_none>;
};
};
pwm10 {
/omit-if-no-ref/
pwm10m0_pins: pwm10m0-pins {
rockchip,pins =
/* pwm10_m0 */
<3 RK_PB5 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm10m1_pins: pwm10m1-pins {
rockchip,pins =
/* pwm10_m1 */
<2 RK_PA1 2 &pcfg_pull_none>;
};
};
pwm11 {
/omit-if-no-ref/
pwm11m0_pins: pwm11m0-pins {
rockchip,pins =
/* pwm11_irm0 */
<3 RK_PB6 5 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm11m1_pins: pwm11m1-pins {
rockchip,pins =
/* pwm11_irm1 */
<4 RK_PC0 3 &pcfg_pull_none>;
};
};
pwm12 {
/omit-if-no-ref/
pwm12m0_pins: pwm12m0-pins {
rockchip,pins =
/* pwm12_m0 */
<3 RK_PB7 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm12m1_pins: pwm12m1-pins {
rockchip,pins =
/* pwm12_m1 */
<4 RK_PC5 1 &pcfg_pull_none>;
};
};
pwm13 {
/omit-if-no-ref/
pwm13m0_pins: pwm13m0-pins {
rockchip,pins =
/* pwm13_m0 */
<3 RK_PC0 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm13m1_pins: pwm13m1-pins {
rockchip,pins =
/* pwm13_m1 */
<4 RK_PC6 1 &pcfg_pull_none>;
};
};
pwm14 {
/omit-if-no-ref/
pwm14m0_pins: pwm14m0-pins {
rockchip,pins =
/* pwm14_m0 */
<3 RK_PC4 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm14m1_pins: pwm14m1-pins {
rockchip,pins =
/* pwm14_m1 */
<4 RK_PC2 1 &pcfg_pull_none>;
};
};
pwm15 {
/omit-if-no-ref/
pwm15m0_pins: pwm15m0-pins {
rockchip,pins =
/* pwm15_irm0 */
<3 RK_PC5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
pwm15m1_pins: pwm15m1-pins {
rockchip,pins =
/* pwm15_irm1 */
<4 RK_PC3 1 &pcfg_pull_none>;
};
};
refclk {
/omit-if-no-ref/
refclk_pins: refclk-pins {
rockchip,pins =
/* refclk_ou */
<0 RK_PA0 1 &pcfg_pull_none>;
};
};
sata {
/omit-if-no-ref/
sata_pins: sata-pins {
rockchip,pins =
/* sata_cpdet */
<0 RK_PA4 2 &pcfg_pull_none>,
/* sata_cppod */
<0 RK_PA6 1 &pcfg_pull_none>,
/* sata_mpswitch */
<0 RK_PA5 2 &pcfg_pull_none>;
};
};
sata0 {
/omit-if-no-ref/
sata0_pins: sata0-pins {
rockchip,pins =
/* sata0_actled */
<4 RK_PC6 3 &pcfg_pull_none>;
};
};
sata1 {
/omit-if-no-ref/
sata1_pins: sata1-pins {
rockchip,pins =
/* sata1_actled */
<4 RK_PC5 3 &pcfg_pull_none>;
};
};
sata2 {
/omit-if-no-ref/
sata2_pins: sata2-pins {
rockchip,pins =
/* sata2_actled */
<4 RK_PC4 3 &pcfg_pull_none>;
};
};
scr {
/omit-if-no-ref/
scr_pins: scr-pins {
rockchip,pins =
/* scr_clk */
<1 RK_PA2 3 &pcfg_pull_none>,
/* scr_det */
<1 RK_PA7 3 &pcfg_pull_up>,
/* scr_io */
<1 RK_PA3 3 &pcfg_pull_up>,
/* scr_rst */
<1 RK_PA5 3 &pcfg_pull_none>;
};
};
sdmmc0 {
/omit-if-no-ref/
sdmmc0_bus4: sdmmc0-bus4 {
rockchip,pins =
/* sdmmc0_d0 */
<1 RK_PD5 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc0_d1 */
<1 RK_PD6 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc0_d2 */
<1 RK_PD7 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc0_d3 */
<2 RK_PA0 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc0_clk: sdmmc0-clk {
rockchip,pins =
/* sdmmc0_clk */
<2 RK_PA2 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc0_cmd: sdmmc0-cmd {
rockchip,pins =
/* sdmmc0_cmd */
<2 RK_PA1 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc0_det: sdmmc0-det {
rockchip,pins =
/* sdmmc0_det */
<0 RK_PA4 1 &pcfg_pull_up>;
};
/omit-if-no-ref/
sdmmc0_pwren: sdmmc0-pwren {
rockchip,pins =
/* sdmmc0_pwren */
<0 RK_PA5 1 &pcfg_pull_none>;
};
};
sdmmc1 {
/omit-if-no-ref/
sdmmc1_bus4: sdmmc1-bus4 {
rockchip,pins =
/* sdmmc1_d0 */
<2 RK_PA3 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc1_d1 */
<2 RK_PA4 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc1_d2 */
<2 RK_PA5 1 &pcfg_pull_up_drv_level_2>,
/* sdmmc1_d3 */
<2 RK_PA6 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc1_clk: sdmmc1-clk {
rockchip,pins =
/* sdmmc1_clk */
<2 RK_PB0 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc1_cmd: sdmmc1-cmd {
rockchip,pins =
/* sdmmc1_cmd */
<2 RK_PA7 1 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc1_det: sdmmc1-det {
rockchip,pins =
/* sdmmc1_det */
<2 RK_PB2 1 &pcfg_pull_up>;
};
/omit-if-no-ref/
sdmmc1_pwren: sdmmc1-pwren {
rockchip,pins =
/* sdmmc1_pwren */
<2 RK_PB1 1 &pcfg_pull_none>;
};
};
sdmmc2 {
/omit-if-no-ref/
sdmmc2m0_bus4: sdmmc2m0-bus4 {
rockchip,pins =
/* sdmmc2_d0m0 */
<3 RK_PC6 3 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d1m0 */
<3 RK_PC7 3 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d2m0 */
<3 RK_PD0 3 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d3m0 */
<3 RK_PD1 3 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m0_clk: sdmmc2m0-clk {
rockchip,pins =
/* sdmmc2_clkm0 */
<3 RK_PD3 3 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m0_cmd: sdmmc2m0-cmd {
rockchip,pins =
/* sdmmc2_cmdm0 */
<3 RK_PD2 3 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m0_det: sdmmc2m0-det {
rockchip,pins =
/* sdmmc2_detm0 */
<3 RK_PD4 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
sdmmc2m0_pwren: sdmmc2m0-pwren {
rockchip,pins =
/* sdmmc2m0_pwren */
<3 RK_PD5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
sdmmc2m1_bus4: sdmmc2m1-bus4 {
rockchip,pins =
/* sdmmc2_d0m1 */
<3 RK_PA1 5 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d1m1 */
<3 RK_PA2 5 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d2m1 */
<3 RK_PA3 5 &pcfg_pull_up_drv_level_2>,
/* sdmmc2_d3m1 */
<3 RK_PA4 5 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m1_clk: sdmmc2m1-clk {
rockchip,pins =
/* sdmmc2_clkm1 */
<3 RK_PA6 5 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m1_cmd: sdmmc2m1-cmd {
rockchip,pins =
/* sdmmc2_cmdm1 */
<3 RK_PA5 5 &pcfg_pull_up_drv_level_2>;
};
/omit-if-no-ref/
sdmmc2m1_det: sdmmc2m1-det {
rockchip,pins =
/* sdmmc2_detm1 */
<3 RK_PA7 4 &pcfg_pull_up>;
};
/omit-if-no-ref/
sdmmc2m1_pwren: sdmmc2m1-pwren {
rockchip,pins =
/* sdmmc2m1_pwren */
<3 RK_PB0 4 &pcfg_pull_none>;
};
};
spdif {
/omit-if-no-ref/
spdifm0_tx: spdifm0-tx {
rockchip,pins =
/* spdifm0_tx */
<1 RK_PA4 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spdifm1_tx: spdifm1-tx {
rockchip,pins =
/* spdifm1_tx */
<3 RK_PC5 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spdifm2_tx: spdifm2-tx {
rockchip,pins =
/* spdifm2_tx */
<4 RK_PC4 2 &pcfg_pull_none>;
};
};
spi0 {
/omit-if-no-ref/
spi0m0_pins: spi0m0-pins {
rockchip,pins =
/* spi0_clkm0 */
<0 RK_PB5 2 &pcfg_pull_none>,
/* spi0_misom0 */
<0 RK_PC5 2 &pcfg_pull_none>,
/* spi0_mosim0 */
<0 RK_PB6 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi0m0_cs0: spi0m0-cs0 {
rockchip,pins =
/* spi0_cs0m0 */
<0 RK_PC6 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi0m0_cs1: spi0m0-cs1 {
rockchip,pins =
/* spi0_cs1m0 */
<0 RK_PC4 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi0m1_pins: spi0m1-pins {
rockchip,pins =
/* spi0_clkm1 */
<2 RK_PD3 3 &pcfg_pull_none>,
/* spi0_misom1 */
<2 RK_PD0 3 &pcfg_pull_none>,
/* spi0_mosim1 */
<2 RK_PD1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi0m1_cs0: spi0m1-cs0 {
rockchip,pins =
/* spi0_cs0m1 */
<2 RK_PD2 3 &pcfg_pull_none>;
};
};
spi1 {
/omit-if-no-ref/
spi1m0_pins: spi1m0-pins {
rockchip,pins =
/* spi1_clkm0 */
<2 RK_PB5 3 &pcfg_pull_none>,
/* spi1_misom0 */
<2 RK_PB6 3 &pcfg_pull_none>,
/* spi1_mosim0 */
<2 RK_PB7 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi1m0_cs0: spi1m0-cs0 {
rockchip,pins =
/* spi1_cs0m0 */
<2 RK_PC0 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi1m0_cs1: spi1m0-cs1 {
rockchip,pins =
/* spi1_cs1m0 */
<2 RK_PC6 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi1m1_pins: spi1m1-pins {
rockchip,pins =
/* spi1_clkm1 */
<3 RK_PC3 3 &pcfg_pull_none>,
/* spi1_misom1 */
<3 RK_PC2 3 &pcfg_pull_none>,
/* spi1_mosim1 */
<3 RK_PC1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi1m1_cs0: spi1m1-cs0 {
rockchip,pins =
/* spi1_cs0m1 */
<3 RK_PA1 3 &pcfg_pull_none>;
};
};
spi2 {
/omit-if-no-ref/
spi2m0_pins: spi2m0-pins {
rockchip,pins =
/* spi2_clkm0 */
<2 RK_PC1 4 &pcfg_pull_none>,
/* spi2_misom0 */
<2 RK_PC2 4 &pcfg_pull_none>,
/* spi2_mosim0 */
<2 RK_PC3 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi2m0_cs0: spi2m0-cs0 {
rockchip,pins =
/* spi2_cs0m0 */
<2 RK_PC4 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi2m0_cs1: spi2m0-cs1 {
rockchip,pins =
/* spi2_cs1m0 */
<2 RK_PC5 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi2m1_pins: spi2m1-pins {
rockchip,pins =
/* spi2_clkm1 */
<3 RK_PA0 3 &pcfg_pull_none>,
/* spi2_misom1 */
<2 RK_PD7 3 &pcfg_pull_none>,
/* spi2_mosim1 */
<2 RK_PD6 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi2m1_cs0: spi2m1-cs0 {
rockchip,pins =
/* spi2_cs0m1 */
<2 RK_PD5 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi2m1_cs1: spi2m1-cs1 {
rockchip,pins =
/* spi2_cs1m1 */
<2 RK_PD4 3 &pcfg_pull_none>;
};
};
spi3 {
/omit-if-no-ref/
spi3m0_pins: spi3m0-pins {
rockchip,pins =
/* spi3_clkm0 */
<4 RK_PB3 4 &pcfg_pull_none>,
/* spi3_misom0 */
<4 RK_PB0 4 &pcfg_pull_none>,
/* spi3_mosim0 */
<4 RK_PB2 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi3m0_cs0: spi3m0-cs0 {
rockchip,pins =
/* spi3_cs0m0 */
<4 RK_PA6 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi3m0_cs1: spi3m0-cs1 {
rockchip,pins =
/* spi3_cs1m0 */
<4 RK_PA7 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi3m1_pins: spi3m1-pins {
rockchip,pins =
/* spi3_clkm1 */
<4 RK_PC2 2 &pcfg_pull_none>,
/* spi3_misom1 */
<4 RK_PC5 2 &pcfg_pull_none>,
/* spi3_mosim1 */
<4 RK_PC3 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi3m1_cs0: spi3m1-cs0 {
rockchip,pins =
/* spi3_cs0m1 */
<4 RK_PC6 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
spi3m1_cs1: spi3m1-cs1 {
rockchip,pins =
/* spi3_cs1m1 */
<4 RK_PD1 2 &pcfg_pull_none>;
};
};
tsadc {
/omit-if-no-ref/
tsadcm0_shut: tsadcm0-shut {
rockchip,pins =
/* tsadcm0_shut */
<0 RK_PA1 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
tsadcm1_shut: tsadcm1-shut {
rockchip,pins =
/* tsadcm1_shut */
<0 RK_PA2 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
tsadc_shutorg: tsadc-shutorg {
rockchip,pins =
/* tsadc_shutorg */
<0 RK_PA1 2 &pcfg_pull_none>;
};
};
uart0 {
/omit-if-no-ref/
uart0_xfer: uart0-xfer {
rockchip,pins =
/* uart0_rx */
<0 RK_PC0 3 &pcfg_pull_up>,
/* uart0_tx */
<0 RK_PC1 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart0_ctsn: uart0-ctsn {
rockchip,pins =
/* uart0_ctsn */
<0 RK_PC7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart0_rtsn: uart0-rtsn {
rockchip,pins =
/* uart0_rtsn */
<0 RK_PC4 3 &pcfg_pull_none>;
};
};
uart1 {
/omit-if-no-ref/
uart1m0_xfer: uart1m0-xfer {
rockchip,pins =
/* uart1_rxm0 */
<2 RK_PB3 2 &pcfg_pull_up>,
/* uart1_txm0 */
<2 RK_PB4 2 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart1m0_ctsn: uart1m0-ctsn {
rockchip,pins =
/* uart1m0_ctsn */
<2 RK_PB6 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart1m0_rtsn: uart1m0-rtsn {
rockchip,pins =
/* uart1m0_rtsn */
<2 RK_PB5 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart1m1_xfer: uart1m1-xfer {
rockchip,pins =
/* uart1_rxm1 */
<3 RK_PD7 4 &pcfg_pull_up>,
/* uart1_txm1 */
<3 RK_PD6 4 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart1m1_ctsn: uart1m1-ctsn {
rockchip,pins =
/* uart1m1_ctsn */
<4 RK_PC1 4 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart1m1_rtsn: uart1m1-rtsn {
rockchip,pins =
/* uart1m1_rtsn */
<4 RK_PB6 4 &pcfg_pull_none>;
};
};
uart2 {
/omit-if-no-ref/
uart2m0_xfer: uart2m0-xfer {
rockchip,pins =
/* uart2_rxm0 */
<0 RK_PD0 1 &pcfg_pull_up>,
/* uart2_txm0 */
<0 RK_PD1 1 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart2m1_xfer: uart2m1-xfer {
rockchip,pins =
/* uart2_rxm1 */
<1 RK_PD6 2 &pcfg_pull_up>,
/* uart2_txm1 */
<1 RK_PD5 2 &pcfg_pull_up>;
};
};
uart3 {
/omit-if-no-ref/
uart3m0_xfer: uart3m0-xfer {
rockchip,pins =
/* uart3_rxm0 */
<1 RK_PA0 2 &pcfg_pull_up>,
/* uart3_txm0 */
<1 RK_PA1 2 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart3m0_ctsn: uart3m0-ctsn {
rockchip,pins =
/* uart3m0_ctsn */
<1 RK_PA3 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart3m0_rtsn: uart3m0-rtsn {
rockchip,pins =
/* uart3m0_rtsn */
<1 RK_PA2 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart3m1_xfer: uart3m1-xfer {
rockchip,pins =
/* uart3_rxm1 */
<3 RK_PC0 4 &pcfg_pull_up>,
/* uart3_txm1 */
<3 RK_PB7 4 &pcfg_pull_up>;
};
};
uart4 {
/omit-if-no-ref/
uart4m0_xfer: uart4m0-xfer {
rockchip,pins =
/* uart4_rxm0 */
<1 RK_PA4 2 &pcfg_pull_up>,
/* uart4_txm0 */
<1 RK_PA6 2 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart4m0_ctsn: uart4m0-ctsn {
rockchip,pins =
/* uart4m0_ctsn */
<1 RK_PA7 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart4m0_rtsn: uart4m0-rtsn {
rockchip,pins =
/* uart4m0_rtsn */
<1 RK_PA5 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart4m1_xfer: uart4m1-xfer {
rockchip,pins =
/* uart4_rxm1 */
<3 RK_PB1 4 &pcfg_pull_up>,
/* uart4_txm1 */
<3 RK_PB2 4 &pcfg_pull_up>;
};
};
uart5 {
/omit-if-no-ref/
uart5m0_xfer: uart5m0-xfer {
rockchip,pins =
/* uart5_rxm0 */
<2 RK_PA1 3 &pcfg_pull_up>,
/* uart5_txm0 */
<2 RK_PA2 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart5m0_ctsn: uart5m0-ctsn {
rockchip,pins =
/* uart5m0_ctsn */
<1 RK_PD7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart5m0_rtsn: uart5m0-rtsn {
rockchip,pins =
/* uart5m0_rtsn */
<2 RK_PA0 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart5m1_xfer: uart5m1-xfer {
rockchip,pins =
/* uart5_rxm1 */
<3 RK_PC3 4 &pcfg_pull_up>,
/* uart5_txm1 */
<3 RK_PC2 4 &pcfg_pull_up>;
};
};
uart6 {
/omit-if-no-ref/
uart6m0_xfer: uart6m0-xfer {
rockchip,pins =
/* uart6_rxm0 */
<2 RK_PA3 3 &pcfg_pull_up>,
/* uart6_txm0 */
<2 RK_PA4 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart6m0_ctsn: uart6m0-ctsn {
rockchip,pins =
/* uart6m0_ctsn */
<2 RK_PC0 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart6m0_rtsn: uart6m0-rtsn {
rockchip,pins =
/* uart6m0_rtsn */
<2 RK_PB7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart6m1_xfer: uart6m1-xfer {
rockchip,pins =
/* uart6_rxm1 */
<1 RK_PD6 3 &pcfg_pull_up>,
/* uart6_txm1 */
<1 RK_PD5 3 &pcfg_pull_up>;
};
};
uart7 {
/omit-if-no-ref/
uart7m0_xfer: uart7m0-xfer {
rockchip,pins =
/* uart7_rxm0 */
<2 RK_PA5 3 &pcfg_pull_up>,
/* uart7_txm0 */
<2 RK_PA6 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart7m0_ctsn: uart7m0-ctsn {
rockchip,pins =
/* uart7m0_ctsn */
<2 RK_PC2 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart7m0_rtsn: uart7m0-rtsn {
rockchip,pins =
/* uart7m0_rtsn */
<2 RK_PC1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart7m1_xfer: uart7m1-xfer {
rockchip,pins =
/* uart7_rxm1 */
<3 RK_PC5 4 &pcfg_pull_up>,
/* uart7_txm1 */
<3 RK_PC4 4 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart7m2_xfer: uart7m2-xfer {
rockchip,pins =
/* uart7_rxm2 */
<4 RK_PA3 4 &pcfg_pull_up>,
/* uart7_txm2 */
<4 RK_PA2 4 &pcfg_pull_up>;
};
};
uart8 {
/omit-if-no-ref/
uart8m0_xfer: uart8m0-xfer {
rockchip,pins =
/* uart8_rxm0 */
<2 RK_PC6 2 &pcfg_pull_up>,
/* uart8_txm0 */
<2 RK_PC5 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart8m0_ctsn: uart8m0-ctsn {
rockchip,pins =
/* uart8m0_ctsn */
<2 RK_PB2 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart8m0_rtsn: uart8m0-rtsn {
rockchip,pins =
/* uart8m0_rtsn */
<2 RK_PB1 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart8m1_xfer: uart8m1-xfer {
rockchip,pins =
/* uart8_rxm1 */
<3 RK_PA0 4 &pcfg_pull_up>,
/* uart8_txm1 */
<2 RK_PD7 4 &pcfg_pull_up>;
};
};
uart9 {
/omit-if-no-ref/
uart9m0_xfer: uart9m0-xfer {
rockchip,pins =
/* uart9_rxm0 */
<2 RK_PA7 3 &pcfg_pull_up>,
/* uart9_txm0 */
<2 RK_PB0 3 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart9m0_ctsn: uart9m0-ctsn {
rockchip,pins =
/* uart9m0_ctsn */
<2 RK_PC4 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart9m0_rtsn: uart9m0-rtsn {
rockchip,pins =
/* uart9m0_rtsn */
<2 RK_PC3 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
uart9m1_xfer: uart9m1-xfer {
rockchip,pins =
/* uart9_rxm1 */
<4 RK_PC6 4 &pcfg_pull_up>,
/* uart9_txm1 */
<4 RK_PC5 4 &pcfg_pull_up>;
};
/omit-if-no-ref/
uart9m2_xfer: uart9m2-xfer {
rockchip,pins =
/* uart9_rxm2 */
<4 RK_PA5 4 &pcfg_pull_up>,
/* uart9_txm2 */
<4 RK_PA4 4 &pcfg_pull_up>;
};
};
vop {
/omit-if-no-ref/
vopm0_pins: vopm0-pins {
rockchip,pins =
/* vop_pwmm0 */
<0 RK_PC3 2 &pcfg_pull_none>;
};
/omit-if-no-ref/
vopm1_pins: vopm1-pins {
rockchip,pins =
/* vop_pwmm1 */
<3 RK_PC4 2 &pcfg_pull_none>;
};
};
};
/*
* This part is edited handly.
*/
&pinctrl {
spi0-hs {
/omit-if-no-ref/
spi0m0_pins_hs: spi0m0-pins {
rockchip,pins =
/* spi0_clkm0 */
<0 RK_PB5 2 &pcfg_pull_up_drv_level_1>,
/* spi0_misom0 */
<0 RK_PC5 2 &pcfg_pull_up_drv_level_1>,
/* spi0_mosim0 */
<0 RK_PB6 2 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi0m0_cs0_hs: spi0m0-cs0 {
rockchip,pins =
/* spi0_cs0m0 */
<0 RK_PC6 2 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi0m0_cs1_hs: spi0m0-cs1 {
rockchip,pins =
/* spi0_cs1m0 */
<0 RK_PC4 2 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi0m1_pins_hs: spi0m1-pins {
rockchip,pins =
/* spi0_clkm1 */
<2 RK_PD3 3 &pcfg_pull_up_drv_level_1>,
/* spi0_misom1 */
<2 RK_PD0 3 &pcfg_pull_up_drv_level_1>,
/* spi0_mosim1 */
<2 RK_PD1 3 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi0m1_cs0_hs: spi0m1-cs0 {
rockchip,pins =
/* spi0_cs0m1 */
<2 RK_PD2 3 &pcfg_pull_up_drv_level_1>;
};
};
spi1-hs {
/omit-if-no-ref/
spi1m0_pins_hs: spi1m0-pins {
rockchip,pins =
/* spi1_clkm0 */
<2 RK_PB5 3 &pcfg_pull_up_drv_level_1>,
/* spi1_misom0 */
<2 RK_PB6 3 &pcfg_pull_up_drv_level_1>,
/* spi1_mosim0 */
<2 RK_PB7 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi1m0_cs0_hs: spi1m0-cs0 {
rockchip,pins =
/* spi1_cs0m0 */
<2 RK_PC0 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi1m0_cs1_hs: spi1m0-cs1 {
rockchip,pins =
/* spi1_cs1m0 */
<2 RK_PC6 3 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi1m1_pins_hs: spi1m1-pins {
rockchip,pins =
/* spi1_clkm1 */
<3 RK_PC3 3 &pcfg_pull_up_drv_level_1>,
/* spi1_misom1 */
<3 RK_PC2 3 &pcfg_pull_up_drv_level_1>,
/* spi1_mosim1 */
<3 RK_PC1 3 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi1m1_cs0_hs: spi1m1-cs0 {
rockchip,pins =
/* spi1_cs0m1 */
<3 RK_PA1 3 &pcfg_pull_up_drv_level_1>;
};
};
spi2-hs {
/omit-if-no-ref/
spi2m0_pins_hs: spi2m0-pins {
rockchip,pins =
/* spi2_clkm0 */
<2 RK_PC1 4 &pcfg_pull_up_drv_level_1>,
/* spi2_misom0 */
<2 RK_PC2 4 &pcfg_pull_up_drv_level_1>,
/* spi2_mosim0 */
<2 RK_PC3 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi2m0_cs0_hs: spi2m0-cs0 {
rockchip,pins =
/* spi2_cs0m0 */
<2 RK_PC4 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi2m0_cs1_hs: spi2m0-cs1 {
rockchip,pins =
/* spi2_cs1m0 */
<2 RK_PC5 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi2m1_pins_hs: spi2m1-pins {
rockchip,pins =
/* spi2_clkm1 */
<3 RK_PA0 3 &pcfg_pull_up_drv_level_1>,
/* spi2_misom1 */
<2 RK_PD7 3 &pcfg_pull_up_drv_level_1>,
/* spi2_mosim1 */
<2 RK_PD6 3 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi2m1_cs0_hs: spi2m1-cs0 {
rockchip,pins =
/* spi2_cs0m1 */
<2 RK_PD5 3 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi2m1_cs1_hs: spi2m1-cs1 {
rockchip,pins =
/* spi2_cs1m1 */
<2 RK_PD4 3 &pcfg_pull_up_drv_level_1>;
};
};
spi3-hs {
/omit-if-no-ref/
spi3m0_pins_hs: spi3m0-pins {
rockchip,pins =
/* spi3_clkm0 */
<4 RK_PB3 4 &pcfg_pull_up_drv_level_1>,
/* spi3_misom0 */
<4 RK_PB0 4 &pcfg_pull_up_drv_level_1>,
/* spi3_mosim0 */
<4 RK_PB2 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi3m0_cs0_hs: spi3m0-cs0 {
rockchip,pins =
/* spi3_cs0m0 */
<4 RK_PA6 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi3m0_cs1_hs: spi3m0-cs1 {
rockchip,pins =
/* spi3_cs1m0 */
<4 RK_PA7 4 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi3m1_pins_hs: spi3m1-pins {
rockchip,pins =
/* spi3_clkm1 */
<4 RK_PC2 2 &pcfg_pull_up_drv_level_1>,
/* spi3_misom1 */
<4 RK_PC5 2 &pcfg_pull_up_drv_level_1>,
/* spi3_mosim1 */
<4 RK_PC3 2 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi3m1_cs0_hs: spi3m1-cs0 {
rockchip,pins =
/* spi3_cs0m1 */
<4 RK_PC6 2 &pcfg_pull_up_drv_level_1>;
};
/omit-if-no-ref/
spi3m1_cs1_hs: spi3m1-cs1 {
rockchip,pins =
/* spi3_cs1m1 */
<4 RK_PD1 2 &pcfg_pull_up_drv_level_1>;
};
};
gmac-txd-level3 {
/omit-if-no-ref/
gmac0_tx_bus2_level3: gmac0-tx-bus2-level3 {
rockchip,pins =
/* gmac0_txd0 */
<2 RK_PB3 1 &pcfg_pull_none_drv_level_3>,
/* gmac0_txd1 */
<2 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
/* gmac0_txen */
<2 RK_PB5 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac0_rgmii_bus_level3: gmac0-rgmii-bus-level3 {
rockchip,pins =
/* gmac0_rxd2 */
<2 RK_PA3 2 &pcfg_pull_none>,
/* gmac0_rxd3 */
<2 RK_PA4 2 &pcfg_pull_none>,
/* gmac0_txd2 */
<2 RK_PA6 2 &pcfg_pull_none_drv_level_3>,
/* gmac0_txd3 */
<2 RK_PA7 2 &pcfg_pull_none_drv_level_3>;
};
/omit-if-no-ref/
gmac1m0_tx_bus2_level3: gmac1m0-tx-bus2-level3 {
rockchip,pins =
/* gmac1_txd0m0 */
<3 RK_PB5 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txd1m0 */
<3 RK_PB6 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txenm0 */
<3 RK_PB7 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m0_rgmii_bus_level3: gmac1m0-rgmii-bus-level3 {
rockchip,pins =
/* gmac1_rxd2m0 */
<3 RK_PA4 3 &pcfg_pull_none>,
/* gmac1_rxd3m0 */
<3 RK_PA5 3 &pcfg_pull_none>,
/* gmac1_txd2m0 */
<3 RK_PA2 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txd3m0 */
<3 RK_PA3 3 &pcfg_pull_none_drv_level_3>;
};
/omit-if-no-ref/
gmac1m1_tx_bus2_level3: gmac1m1-tx-bus2-level3 {
rockchip,pins =
/* gmac1_txd0m1 */
<4 RK_PA4 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txd1m1 */
<4 RK_PA5 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txenm1 */
<4 RK_PA6 3 &pcfg_pull_none>;
};
/omit-if-no-ref/
gmac1m1_rgmii_bus_level3: gmac1m1-rgmii-bus-level3 {
rockchip,pins =
/* gmac1_rxd2m1 */
<4 RK_PA1 3 &pcfg_pull_none>,
/* gmac1_rxd3m1 */
<4 RK_PA2 3 &pcfg_pull_none>,
/* gmac1_txd2m1 */
<3 RK_PD6 3 &pcfg_pull_none_drv_level_3>,
/* gmac1_txd3m1 */
<3 RK_PD7 3 &pcfg_pull_none_drv_level_3>;
};
};
gmac-txc-level2 {
/omit-if-no-ref/
gmac0_rgmii_clk_level2: gmac0-rgmii-clk-level2 {
rockchip,pins =
/* gmac0_rxclk */
<2 RK_PA5 2 &pcfg_pull_none>,
/* gmac0_txclk */
<2 RK_PB0 2 &pcfg_pull_none_drv_level_2>;
};
/omit-if-no-ref/
gmac1m0_rgmii_clk_level2: gmac1m0-rgmii-clk-level2 {
rockchip,pins =
/* gmac1_rxclkm0 */
<3 RK_PA7 3 &pcfg_pull_none>,
/* gmac1_txclkm0 */
<3 RK_PA6 3 &pcfg_pull_none_drv_level_2>;
};
/omit-if-no-ref/
gmac1m1_rgmii_clk_level2: gmac1m1-rgmii-clk-level2 {
rockchip,pins =
/* gmac1_rxclkm1 */
<4 RK_PA3 3 &pcfg_pull_none>,
/* gmac1_txclkm1 */
<4 RK_PA0 3 &pcfg_pull_none_drv_level_2>;
};
};
tsadc {
/omit-if-no-ref/
tsadc_pin: tsadc-pin {
rockchip,pins =
/* tsadc_pin */
<0 RK_PA1 0 &pcfg_pull_none>;
};
};
lcdc {
/omit-if-no-ref/
lcdc_clock: lcdc-clock {
rockchip,pins =
/* lcdc_clk */
<3 RK_PA0 1 &pcfg_pull_none>,
/* lcdc_den */
<3 RK_PC3 1 &pcfg_pull_none>,
/* lcdc_hsync */
<3 RK_PC1 1 &pcfg_pull_none>,
/* lcdc_vsync */
<3 RK_PC2 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
lcdc_data16: lcdc-data16 {
rockchip,pins =
/* lcdc_d3 */
<2 RK_PD3 1 &pcfg_pull_none>,
/* lcdc_d4 */
<2 RK_PD4 1 &pcfg_pull_none>,
/* lcdc_d5 */
<2 RK_PD5 1 &pcfg_pull_none>,
/* lcdc_d6 */
<2 RK_PD6 1 &pcfg_pull_none>,
/* lcdc_d7 */
<2 RK_PD7 1 &pcfg_pull_none>,
/* lcdc_d10 */
<3 RK_PA3 1 &pcfg_pull_none>,
/* lcdc_d11 */
<3 RK_PA4 1 &pcfg_pull_none>,
/* lcdc_d12 */
<3 RK_PA5 1 &pcfg_pull_none>,
/* lcdc_d13 */
<3 RK_PA6 1 &pcfg_pull_none>,
/* lcdc_d14 */
<3 RK_PA7 1 &pcfg_pull_none>,
/* lcdc_d15 */
<3 RK_PB0 1 &pcfg_pull_none>,
/* lcdc_d19 */
<3 RK_PB4 1 &pcfg_pull_none>,
/* lcdc_d20 */
<3 RK_PB5 1 &pcfg_pull_none>,
/* lcdc_d21 */
<3 RK_PB6 1 &pcfg_pull_none>,
/* lcdc_d22 */
<3 RK_PB7 1 &pcfg_pull_none>,
/* lcdc_d23 */
<3 RK_PC0 1 &pcfg_pull_none>;
};
/omit-if-no-ref/
lcdc_data18: lcdc-data18 {
rockchip,pins =
/* lcdc_d2 */
<2 RK_PD2 1 &pcfg_pull_none>,
/* lcdc_d3 */
<2 RK_PD3 1 &pcfg_pull_none>,
/* lcdc_d4 */
<2 RK_PD4 1 &pcfg_pull_none>,
/* lcdc_d5 */
<2 RK_PD5 1 &pcfg_pull_none>,
/* lcdc_d6 */
<2 RK_PD6 1 &pcfg_pull_none>,
/* lcdc_d7 */
<2 RK_PD7 1 &pcfg_pull_none>,
/* lcdc_d10 */
<3 RK_PA3 1 &pcfg_pull_none>,
/* lcdc_d11 */
<3 RK_PA4 1 &pcfg_pull_none>,
/* lcdc_d12 */
<3 RK_PA5 1 &pcfg_pull_none>,
/* lcdc_d13 */
<3 RK_PA6 1 &pcfg_pull_none>,
/* lcdc_d14 */
<3 RK_PA7 1 &pcfg_pull_none>,
/* lcdc_d15 */
<3 RK_PB0 1 &pcfg_pull_none>,
/* lcdc_d18 */
<3 RK_PB3 1 &pcfg_pull_none>,
/* lcdc_d19 */
<3 RK_PB4 1 &pcfg_pull_none>,
/* lcdc_d20 */
<3 RK_PB5 1 &pcfg_pull_none>,
/* lcdc_d21 */
<3 RK_PB6 1 &pcfg_pull_none>,
/* lcdc_d22 */
<3 RK_PB7 1 &pcfg_pull_none>,
/* lcdc_d23 */
<3 RK_PC0 1 &pcfg_pull_none>;
};
};
};
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Helpers/definitions related to MSR access.
*/
#ifndef BOOT_MSR_H
#define BOOT_MSR_H
#include <asm/shared/msr.h>
/*
* The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
* boot kernel since they rely on tracepoint/exception handling infrastructure
* that's not available here.
*/
static inline void boot_rdmsr(unsigned int reg, struct msr *m)
{
asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
}
static inline void boot_wrmsr(unsigned int reg, const struct msr *m)
{
asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
}
#endif /* BOOT_MSR_H */
|
/*
* Copyright (C) 2013 Altera Corporation
* Copyright (C) 2011-2012 Tobias Klauser <[email protected]>
* Copyright (C) 2004 Microtronix Datacom Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/* sys_cacheflush -- flush the processor cache. */
asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
unsigned int op)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
if (len == 0)
return 0;
/* We only support op 0 now, return error if op is non-zero.*/
if (op)
return -EINVAL;
/* Check for overflow */
if (addr + len < addr)
return -EFAULT;
if (mmap_read_lock_killable(mm))
return -EINTR;
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma(mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
mmap_read_unlock(mm);
return -EFAULT;
}
flush_cache_range(vma, addr, addr + len);
mmap_read_unlock(mm);
return 0;
}
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/act_meta_tc_index.c IFE skb->tc_index metadata module
*
* copyright Jamal Hadi Salim (2016)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <uapi/linux/tc_act/tc_ife.h>
#include <net/tc_act/tc_ife.h>
static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
struct tcf_meta_info *e)
{
u32 ifetc_index = skb->tc_index;
return ife_encode_meta_u16(ifetc_index, skbdata, e);
}
static int skbtcindex_decode(struct sk_buff *skb, void *data, u16 len)
{
u16 ifetc_index = *(u16 *)data;
skb->tc_index = ntohs(ifetc_index);
return 0;
}
static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
{
return ife_check_meta_u16(skb->tc_index, e);
}
static struct tcf_meta_ops ife_skbtcindex_ops = {
.metaid = IFE_META_TCINDEX,
.metatype = NLA_U16,
.name = "tc_index",
.synopsis = "skb tc_index 16 bit metadata",
.check_presence = skbtcindex_check,
.encode = skbtcindex_encode,
.decode = skbtcindex_decode,
.get = ife_get_meta_u16,
.alloc = ife_alloc_meta_u16,
.release = ife_release_meta_gen,
.validate = ife_validate_meta_u16,
.owner = THIS_MODULE,
};
static int __init ifetc_index_init_module(void)
{
return register_ife_op(&ife_skbtcindex_ops);
}
static void __exit ifetc_index_cleanup_module(void)
{
unregister_ife_op(&ife_skbtcindex_ops);
}
module_init(ifetc_index_init_module);
module_exit(ifetc_index_cleanup_module);
MODULE_AUTHOR("Jamal Hadi Salim(2016)");
MODULE_DESCRIPTION("Inter-FE skb tc_index metadata module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_IFE_META("tcindex");
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include "bnx2x.h"
#define NA 0xCD
#define IDLE_CHK_E1 0x01
#define IDLE_CHK_E1H 0x02
#define IDLE_CHK_E2 0x04
#define IDLE_CHK_E3A0 0x08
#define IDLE_CHK_E3B0 0x10
#define IDLE_CHK_ERROR 1
#define IDLE_CHK_ERROR_NO_TRAFFIC 2
#define IDLE_CHK_WARNING 3
#define MAX_FAIL_MSG 256
/* statistics and error reporting */
static int idle_chk_errors, idle_chk_warnings;
/* masks for all chip types */
static int is_e1, is_e1h, is_e2, is_e3a0, is_e3b0;
/* struct for the argument list for a predicate in the self test databasei */
struct st_pred_args {
u32 val1; /* value read from first register */
u32 val2; /* value read from second register, if applicable */
u32 imm1; /* 1st value in predicate condition, left-to-right */
u32 imm2; /* 2nd value in predicate condition, left-to-right */
u32 imm3; /* 3rd value in predicate condition, left-to-right */
u32 imm4; /* 4th value in predicate condition, left-to-right */
};
/* struct representing self test record - a single test */
struct st_record {
u8 chip_mask;
u8 macro;
u32 reg1;
u32 reg2;
u16 loop;
u16 incr;
int (*bnx2x_predicate)(struct st_pred_args *pred_args);
u32 reg3;
u8 severity;
char *fail_msg;
struct st_pred_args pred_args;
};
/* predicates for self test */
static int peq(struct st_pred_args *args)
{
return (args->val1 == args->imm1);
}
static int pneq(struct st_pred_args *args)
{
return (args->val1 != args->imm1);
}
static int pand_neq(struct st_pred_args *args)
{
return ((args->val1 & args->imm1) != args->imm2);
}
static int pand_neq_x2(struct st_pred_args *args)
{
return (((args->val1 & args->imm1) != args->imm2) &&
((args->val1 & args->imm3) != args->imm4));
}
static int pneq_err(struct st_pred_args *args)
{
return ((args->val1 != args->imm1) && (idle_chk_errors > args->imm2));
}
static int pgt(struct st_pred_args *args)
{
return (args->val1 > args->imm1);
}
static int pneq_r2(struct st_pred_args *args)
{
return (args->val1 != args->val2);
}
static int plt_sub_r2(struct st_pred_args *args)
{
return (args->val1 < (args->val2 - args->imm1));
}
static int pne_sub_r2(struct st_pred_args *args)
{
return (args->val1 != (args->val2 - args->imm1));
}
static int prsh_and_neq(struct st_pred_args *args)
{
return (((args->val1 >> args->imm1) & args->imm2) != args->imm3);
}
static int peq_neq_r2(struct st_pred_args *args)
{
return ((args->val1 == args->imm1) && (args->val2 != args->imm2));
}
static int peq_neq_neq_r2(struct st_pred_args *args)
{
return ((args->val1 == args->imm1) && (args->val2 != args->imm2) &&
(args->val2 != args->imm3));
}
/* struct holding the database of self test checks (registers and predicates) */
/* lines start from 2 since line 1 is heading in csv */
#define ST_DB_LINES 468
static struct st_record st_database[ST_DB_LINES] = {
/*line 2*/{(0x3), 1, 0x2114,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: ucorr_err_status is not 0",
{NA, NA, 0x0FF010, 0, NA, NA} },
/*line 3*/{(0x3), 1, 0x2114,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PCIE: ucorr_err_status - Unsupported request error",
{NA, NA, 0x100000, 0, NA, NA} },
/*line 4*/{(0x3), 1, 0x2120,
NA, 1, 0, pand_neq_x2,
NA, IDLE_CHK_WARNING,
"PCIE: corr_err_status is not 0x2000",
{NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
/*line 5*/{(0x3), 1, 0x2814,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: attentions register is not 0x40100",
{NA, NA, ~0x40100, 0, NA, NA} },
/*line 6*/{(0x2), 1, 0x281c,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: attentions register is not 0x40040100",
{NA, NA, ~0x40040100, 0, NA, NA} },
/*line 7*/{(0x2), 1, 0x2820,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: attentions register is not 0x40040100",
{NA, NA, ~0x40040100, 0, NA, NA} },
/*line 8*/{(0x3), 1, PXP2_REG_PGL_EXP_ROM2,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: There are outstanding read requests. Not all completios have arrived for read requests on tags that are marked with 0",
{NA, NA, 0xffffffff, NA, NA, NA} },
/*line 9*/{(0x3), 2, 0x212c,
NA, 4, 4, pneq_err,
NA, IDLE_CHK_WARNING,
"PCIE: error packet header is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 10*/{(0x1C), 1, 0x2104,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: ucorr_err_status is not 0",
{NA, NA, 0x0FD010, 0, NA, NA} },
/*line 11*/{(0x1C), 1, 0x2104,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PCIE: ucorr_err_status - Unsupported request error",
{NA, NA, 0x100000, 0, NA, NA} },
/*line 12*/{(0x1C), 1, 0x2104,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PCIE: ucorr_err_status - Flow Control Protocol Error",
{NA, NA, 0x2000, 0, NA, NA} },
/*line 13*/{(0x1C), 1, 0x2110,
NA, 1, 0, pand_neq_x2,
NA, IDLE_CHK_WARNING,
"PCIE: corr_err_status is not 0x2000",
{NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
/*line 14*/{(0x1C), 1, 0x2814,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PCIE: TTX_BRIDGE_FORWARD_ERR - Received master request while BME was 0",
{NA, NA, 0x2000000, 0, NA, NA} },
/*line 15*/{(0x1C), 1, 0x2814,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: Func 0 1: attentions register is not 0x2040902",
{NA, NA, ~0x2040902, 0, NA, NA} },
/*line 16*/{(0x1C), 1, 0x2854,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: Func 2 3 4: attentions register is not 0x10240902",
{NA, NA, ~0x10240902, 0, NA, NA} },
/*line 17*/{(0x1C), 1, 0x285c,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: Func 5 6 7: attentions register is not 0x10240902",
{NA, NA, ~0x10240902, 0, NA, NA} },
/*line 18*/{(0x18), 1, 0x3040,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"PCIE: Overflow in DLP2TLP buffer",
{NA, NA, 0x2, 0, NA, NA} },
/*line 19*/{(0x1C), 1, PXP2_REG_PGL_EXP_ROM2,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: There are outstanding read requests for tags 0-31. Not all completios have arrived for read requests on tags that are marked with 0",
{NA, NA, 0xffffffff, NA, NA, NA} },
/*line 20*/{(0x1C), 2, 0x211c,
NA, 4, 4, pneq_err,
NA, IDLE_CHK_WARNING,
"PCIE: error packet header is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 21*/{(0x1C), 1, PGLUE_B_REG_INCORRECT_RCV_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PGLUE_B: Packet received from PCIe not according to the rules",
{NA, NA, 0, NA, NA, NA} },
/*line 22*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_31_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: was_error for VFs 0-31 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 23*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_63_32,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: was_error for VFs 32-63 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 24*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_95_64,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: was_error for VFs 64-95 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 25*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_127_96,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: was_error for VFs 96-127 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 26*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_PF_7_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: was_error for PFs 0-7 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 27*/{(0x1C), 1, PGLUE_B_REG_RX_ERR_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Completion received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout; 1 - Unsupported Request; 2 - Completer Abort. (12) - valid bit",
{NA, NA, 0, NA, NA, NA} },
/*line 28*/{(0x1C), 1, PGLUE_B_REG_RX_TCPL_ERR_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: ATS TCPL received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout ; 1 - Unsupported Request; 2 - Completer Abort. (16:12) - OTB Entry ID. (17) - valid bit",
{NA, NA, 0, NA, NA, NA} },
/*line 29*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_31_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master write. Address(31:0) is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 30*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_63_32,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master write. Address(63:32) is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 31*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master write. Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
{NA, NA, 0, NA, NA, NA} },
/*line 32*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS2,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master write. Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
{NA, NA, 0, NA, NA, NA} },
/*line 33*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_31_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE: Error in master read address(31:0) is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 34*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_63_32,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master read address(63:32) is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 35*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master read Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
{NA, NA, 0, NA, NA, NA} },
/*line 36*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS2,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Error in master read Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
{NA, NA, 0, NA, NA, NA} },
/*line 37*/{(0x1C), 1, PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Target VF length violation access",
{NA, NA, 0, NA, NA, NA} },
/*line 38*/{(0x1C), 1, PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Target VF GRC space access failed permission check",
{NA, NA, 0, NA, NA, NA} },
/*line 39*/{(0x1C), 1, PGLUE_B_REG_TAGS_63_32,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: There are outstanding read requests for tags 32-63. Not all completios have arrived for read requests on tags that are marked with 0",
{NA, NA, 0xffffffff, NA, NA, NA} },
/*line 40*/{(0x1C), 3, PXP_REG_HST_VF_DISABLED_ERROR_VALID,
PXP_REG_HST_VF_DISABLED_ERROR_DATA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: Access to disabled VF took place",
{NA, NA, 0, NA, NA, NA} },
/*line 41*/{(0x1C), 1, PXP_REG_HST_PER_VIOLATION_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: Zone A permission violation occurred",
{NA, NA, 0, NA, NA, NA} },
/*line 42*/{(0x1C), 1, PXP_REG_HST_INCORRECT_ACCESS_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: Incorrect transaction took place",
{NA, NA, 0, NA, NA, NA} },
/*line 43*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: Completion received with error. Error details register is not 0. (15:0) - ECHO. (28:16) - Sub Request length plus start_offset_2_0 minus 1",
{NA, NA, 0, NA, NA, NA} },
/*line 44*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS2,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: Completion received with error. Error details 2nd register is not 0. (4:0) - VQ ID. (8:5) - client ID. (9) - valid bit",
{NA, NA, 0, NA, NA, NA} },
/*line 45*/{(0x1F), 1, PXP2_REG_RQ_VQ0_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ0 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 46*/{(0x1F), 1, PXP2_REG_RQ_VQ1_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ1 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 47*/{(0x1F), 1, PXP2_REG_RQ_VQ2_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ2 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 48*/{(0x1F), 1, PXP2_REG_RQ_VQ3_ENTRY_CNT,
NA, 1, 0, pgt,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ3 is not empty",
{NA, NA, 2, NA, NA, NA} },
/*line 49*/{(0x1F), 1, PXP2_REG_RQ_VQ4_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ4 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 50*/{(0x1F), 1, PXP2_REG_RQ_VQ5_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ5 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 51*/{(0x1F), 1, PXP2_REG_RQ_VQ6_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ6 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 52*/{(0x1F), 1, PXP2_REG_RQ_VQ7_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ7 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 53*/{(0x1F), 1, PXP2_REG_RQ_VQ8_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ8 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 54*/{(0x1F), 1, PXP2_REG_RQ_VQ9_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ9 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 55*/{(0x1F), 1, PXP2_REG_RQ_VQ10_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ10 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 56*/{(0x1F), 1, PXP2_REG_RQ_VQ11_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ11 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 57*/{(0x1F), 1, PXP2_REG_RQ_VQ12_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ12 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 58*/{(0x1F), 1, PXP2_REG_RQ_VQ13_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ13 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 59*/{(0x1F), 1, PXP2_REG_RQ_VQ14_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ14 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 60*/{(0x1F), 1, PXP2_REG_RQ_VQ15_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ15 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 61*/{(0x1F), 1, PXP2_REG_RQ_VQ16_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ16 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 62*/{(0x1F), 1, PXP2_REG_RQ_VQ17_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ17 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 63*/{(0x1F), 1, PXP2_REG_RQ_VQ18_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ18 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 64*/{(0x1F), 1, PXP2_REG_RQ_VQ19_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ19 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 65*/{(0x1F), 1, PXP2_REG_RQ_VQ20_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ20 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 66*/{(0x1F), 1, PXP2_REG_RQ_VQ21_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ21 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 67*/{(0x1F), 1, PXP2_REG_RQ_VQ22_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ22 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 68*/{(0x1F), 1, PXP2_REG_RQ_VQ23_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ23 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 69*/{(0x1F), 1, PXP2_REG_RQ_VQ24_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ24 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 70*/{(0x1F), 1, PXP2_REG_RQ_VQ25_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ25 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 71*/{(0x1F), 1, PXP2_REG_RQ_VQ26_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ26 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 72*/{(0x1F), 1, PXP2_REG_RQ_VQ27_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ27 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 73*/{(0x1F), 1, PXP2_REG_RQ_VQ28_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ28 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 74*/{(0x1F), 1, PXP2_REG_RQ_VQ29_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ29 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 75*/{(0x1F), 1, PXP2_REG_RQ_VQ30_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ30 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 76*/{(0x1F), 1, PXP2_REG_RQ_VQ31_ENTRY_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: VQ31 is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 77*/{(0x1F), 1, PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: rq_ufifo_num_of_entry is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 78*/{(0x1F), 1, PXP2_REG_RQ_RBC_DONE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: rq_rbc_done is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 79*/{(0x1F), 1, PXP2_REG_RQ_CFG_DONE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: rq_cfg_done is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 80*/{(0x3), 1, PXP2_REG_PSWRQ_BW_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: rq_read_credit and rq_write_credit are not 3",
{NA, NA, 0x1B, NA, NA, NA} },
/*line 81*/{(0x1F), 1, PXP2_REG_RD_START_INIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: rd_start_init is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 82*/{(0x1F), 1, PXP2_REG_RD_INIT_DONE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: rd_init_done is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 83*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
PXP2_REG_RD_SR_NUM_CFG, 1, 0, pne_sub_r2,
NA, IDLE_CHK_WARNING,
"PXP2: rd_sr_cnt is not equal to rd_sr_num_cfg",
{NA, NA, 1, NA, NA, NA} },
/*line 84*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
PXP2_REG_RD_BLK_NUM_CFG, 1, 0, pneq_r2,
NA, IDLE_CHK_WARNING,
"PXP2: rd_blk_cnt is not equal to rd_blk_num_cfg",
{NA, NA, NA, NA, NA, NA} },
/*line 85*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
PXP2_REG_RD_SR_NUM_CFG, 1, 0, plt_sub_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: There are more than two unused SRs",
{NA, NA, 3, NA, NA, NA} },
/*line 86*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
PXP2_REG_RD_BLK_NUM_CFG, 1, 0, plt_sub_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: There are more than two unused blocks",
{NA, NA, 2, NA, NA, NA} },
/*line 87*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: P0 All delivery ports are not idle",
{NA, NA, 1, NA, NA, NA} },
/*line 88*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: P1 All delivery ports are not idle",
{NA, NA, 1, NA, NA, NA} },
/*line 89*/{(0x1F), 2, PXP2_REG_RD_ALMOST_FULL_0,
NA, 11, 4, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: rd_almost_full is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 90*/{(0x1F), 1, PXP2_REG_RD_DISABLE_INPUTS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: PSWRD inputs are disabled",
{NA, NA, 0, NA, NA, NA} },
/*line 91*/{(0x1F), 1, PXP2_REG_HST_HEADER_FIFO_STATUS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: HST header FIFO status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 92*/{(0x1F), 1, PXP2_REG_HST_DATA_FIFO_STATUS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: HST data FIFO status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 93*/{(0x3), 1, PXP2_REG_PGL_WRITE_BLOCKED,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: pgl_write_blocked is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 94*/{(0x3), 1, PXP2_REG_PGL_READ_BLOCKED,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: pgl_read_blocked is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 95*/{(0x1C), 1, PXP2_REG_PGL_WRITE_BLOCKED,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: pgl_write_blocked is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 96*/{(0x1C), 1, PXP2_REG_PGL_READ_BLOCKED,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: pgl_read_blocked is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 97*/{(0x1F), 1, PXP2_REG_PGL_TXW_CDTS,
NA, 1, 0, prsh_and_neq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PXP2: There is data which is ready",
{NA, NA, 17, 1, 0, NA} },
/*line 98*/{(0x1F), 1, PXP_REG_HST_ARB_IS_IDLE,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: HST arbiter is not idle",
{NA, NA, 1, NA, NA, NA} },
/*line 99*/{(0x1F), 1, PXP_REG_HST_CLIENTS_WAITING_TO_ARB,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: HST one of the clients is waiting for delivery",
{NA, NA, 0, NA, NA, NA} },
/*line 100*/{(0x1E), 1, PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: HST Close the gates: Discarding internal writes",
{NA, NA, 0, NA, NA, NA} },
/*line 101*/{(0x1E), 1, PXP_REG_HST_DISCARD_DOORBELLS_STATUS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: HST Close the gates: Discarding doorbells",
{NA, NA, 0, NA, NA, NA} },
/*line 102*/{(0x1C), 1, PXP2_REG_RQ_GARB,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PXP2: PSWRQ Close the gates is asserted. Check AEU AFTER_INVERT registers for parity errors",
{NA, NA, 0x1000, 0, NA, NA} },
/*line 103*/{(0x1F), 1, DMAE_REG_GO_C0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 0 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 104*/{(0x1F), 1, DMAE_REG_GO_C1,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 1 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 105*/{(0x1F), 1, DMAE_REG_GO_C2,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 2 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 106*/{(0x1F), 1, DMAE_REG_GO_C3,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 3 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 107*/{(0x1F), 1, DMAE_REG_GO_C4,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 4 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 108*/{(0x1F), 1, DMAE_REG_GO_C5,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 5 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 109*/{(0x1F), 1, DMAE_REG_GO_C6,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 6 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 110*/{(0x1F), 1, DMAE_REG_GO_C7,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 7 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 111*/{(0x1F), 1, DMAE_REG_GO_C8,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 8 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 112*/{(0x1F), 1, DMAE_REG_GO_C9,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 9 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 113*/{(0x1F), 1, DMAE_REG_GO_C10,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 10 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 114*/{(0x1F), 1, DMAE_REG_GO_C11,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 11 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 115*/{(0x1F), 1, DMAE_REG_GO_C12,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 12 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 116*/{(0x1F), 1, DMAE_REG_GO_C13,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 13 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 117*/{(0x1F), 1, DMAE_REG_GO_C14,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 14 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 118*/{(0x1F), 1, DMAE_REG_GO_C15,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DMAE: command 15 go is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 119*/{(0x1F), 1, CFC_REG_ERROR_VECTOR,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CFC: error vector is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 120*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ARRIVING,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CFC: number of arriving LCIDs is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 121*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ALLOC,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CFC: number of alloc LCIDs is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 122*/{(0x1F), 1, CFC_REG_NUM_LCIDS_LEAVING,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CFC: number of leaving LCIDs is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 123*/{(0x1F), 7, CFC_REG_INFO_RAM,
CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_neq_r2,
CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
"CFC: AC is neither 0 nor 2 on connType 0 (ETH)",
{NA, NA, 0, 0, 2, NA} },
/*line 124*/{(0x1F), 7, CFC_REG_INFO_RAM,
CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
"CFC: AC is not 0 on connType 1 (TOE)",
{NA, NA, 1, 0, NA, NA} },
/*line 125*/{(0x1F), 7, CFC_REG_INFO_RAM,
CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
"CFC: AC is not 0 on connType 3 (iSCSI)",
{NA, NA, 3, 0, NA, NA} },
/*line 126*/{(0x1F), 7, CFC_REG_INFO_RAM,
CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
"CFC: AC is not 0 on connType 4 (FCoE)",
{NA, NA, 4, 0, NA, NA} },
/*line 127*/{(0x1F), 2, QM_REG_QTASKCTR_0,
NA, 64, 4, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Queue is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 128*/{(0xF), 3, QM_REG_VOQCREDIT_0,
QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_0, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 129*/{(0xF), 3, QM_REG_VOQCREDIT_1,
QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_1, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 130*/{(0xF), 3, QM_REG_VOQCREDIT_4,
QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_4, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 131*/{(0x3), 3, QM_REG_PORT0BYTECRD,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: P0 Byte credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 132*/{(0x3), 3, QM_REG_PORT1BYTECRD,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: P1 Byte credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 133*/{(0x1F), 1, CCM_REG_CAM_OCCUP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: XX protection CAM is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 134*/{(0x1F), 1, TCM_REG_CAM_OCCUP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: XX protection CAM is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 135*/{(0x1F), 1, UCM_REG_CAM_OCCUP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: XX protection CAM is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 136*/{(0x1F), 1, XCM_REG_CAM_OCCUP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: XX protection CAM is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 137*/{(0x1F), 1, BRB1_REG_NUM_OF_FULL_BLOCKS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"BRB1: BRB is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 138*/{(0x1F), 1, CSEM_REG_SLEEP_THREADS_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: There are sleeping threads",
{NA, NA, 0, NA, NA, NA} },
/*line 139*/{(0x1F), 1, TSEM_REG_SLEEP_THREADS_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: There are sleeping threads",
{NA, NA, 0, NA, NA, NA} },
/*line 140*/{(0x1F), 1, USEM_REG_SLEEP_THREADS_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: There are sleeping threads",
{NA, NA, 0, NA, NA, NA} },
/*line 141*/{(0x1F), 1, XSEM_REG_SLEEP_THREADS_VALID,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSEM: There are sleeping threads",
{NA, NA, 0, NA, NA, NA} },
/*line 142*/{(0x1F), 1, CSEM_REG_SLOW_EXT_STORE_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: External store FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 143*/{(0x1F), 1, TSEM_REG_SLOW_EXT_STORE_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: External store FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 144*/{(0x1F), 1, USEM_REG_SLOW_EXT_STORE_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: External store FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 145*/{(0x1F), 1, XSEM_REG_SLOW_EXT_STORE_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSEM: External store FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 146*/{(0x1F), 1, CSDM_REG_SYNC_PARSER_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSDM: Parser serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 147*/{(0x1F), 1, TSDM_REG_SYNC_PARSER_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSDM: Parser serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 148*/{(0x1F), 1, USDM_REG_SYNC_PARSER_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USDM: Parser serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 149*/{(0x1F), 1, XSDM_REG_SYNC_PARSER_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSDM: Parser serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 150*/{(0x1F), 1, CSDM_REG_SYNC_SYNC_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSDM: Parser SYNC serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 151*/{(0x1F), 1, TSDM_REG_SYNC_SYNC_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSDM: Parser SYNC serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 152*/{(0x1F), 1, USDM_REG_SYNC_SYNC_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USDM: Parser SYNC serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 153*/{(0x1F), 1, XSDM_REG_SYNC_SYNC_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSDM: Parser SYNC serial FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 154*/{(0x1F), 1, CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
{NA, NA, 1, NA, NA, NA} },
/*line 155*/{(0x1F), 1, TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
{NA, NA, 1, NA, NA, NA} },
/*line 156*/{(0x1F), 1, USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
{NA, NA, 1, NA, NA, NA} },
/*line 157*/{(0x1F), 1, XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
{NA, NA, 1, NA, NA, NA} },
/*line 158*/{(0x1F), 1, DORQ_REG_DQ_FILL_LVLF,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DORQ: DORQ queue is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 159*/{(0x1F), 1, CFC_REG_CFC_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CFC: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 160*/{(0x1F), 1, CDU_REG_CDU_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CDU: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 161*/{(0x1F), 1, CCM_REG_CCM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 162*/{(0x1F), 1, TCM_REG_TCM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 163*/{(0x1F), 1, UCM_REG_UCM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 164*/{(0x1F), 1, XCM_REG_XCM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 165*/{(0xF), 1, PBF_REG_PBF_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PBF: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 166*/{(0x1F), 1, TM_REG_TM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TIMERS: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 167*/{(0x1F), 1, DORQ_REG_DORQ_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"DORQ: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 168*/{(0x1F), 1, SRC_REG_SRC_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"SRCH: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 169*/{(0x1F), 1, PRS_REG_PRS_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PRS: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 170*/{(0x1F), 1, BRB1_REG_BRB1_INT_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"BRB1: Interrupt status is not 0",
{NA, NA, ~0xFC00, 0, NA, NA} },
/*line 171*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XPB: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 172*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UPB: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 173*/{(0x1), 1, PXP2_REG_PXP2_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: Interrupt status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 174*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: Interrupt status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 175*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: Interrupt status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 176*/{(0x1F), 1, QM_REG_QM_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 177*/{(0x1F), 1, PXP_REG_PXP_INT_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: P0 Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 178*/{(0x1F), 1, PXP_REG_PXP_INT_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: P1 Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 179*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_INT_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: Interrupt status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 180*/{(0x1F), 1, DORQ_REG_RSPA_CRD_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DORQ: Credit to XCM is not full",
{NA, NA, 2, NA, NA, NA} },
/*line 181*/{(0x1F), 1, DORQ_REG_RSPB_CRD_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"DORQ: Credit to UCM is not full",
{NA, NA, 2, NA, NA, NA} },
/*line 182*/{(0x3), 1, QM_REG_VOQCRDERRREG,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: Credit error register is not 0 (byte or credit overflow/underflow)",
{NA, NA, 0, NA, NA, NA} },
/*line 183*/{(0x1F), 1, DORQ_REG_DQ_FULL_ST,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"DORQ: DORQ queue is full",
{NA, NA, 0, NA, NA, NA} },
/*line 184*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"AEU: P0 AFTER_INVERT_1 is not 0",
{NA, NA, ~0xCFFC, 0, NA, NA} },
/*line 185*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"AEU: P0 AFTER_INVERT_2 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 186*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: P0 AFTER_INVERT_3 is not 0",
{NA, NA, ~0xFFFF0000, 0, NA, NA} },
/*line 187*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: P0 AFTER_INVERT_4 is not 0",
{NA, NA, ~0x801FFFFF, 0, NA, NA} },
/*line 188*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_1,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"AEU: P1 AFTER_INVERT_1 is not 0",
{NA, NA, ~0xCFFC, 0, NA, NA} },
/*line 189*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"AEU: P1 AFTER_INVERT_2 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 190*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_1,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: P1 AFTER_INVERT_3 is not 0",
{NA, NA, ~0xFFFF0000, 0, NA, NA} },
/*line 191*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_1,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: P1 AFTER_INVERT_4 is not 0",
{NA, NA, ~0x801FFFFF, 0, NA, NA} },
/*line 192*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_MCP,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"AEU: MCP AFTER_INVERT_1 is not 0",
{NA, NA, ~0xCFFC, 0, NA, NA} },
/*line 193*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_MCP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"AEU: MCP AFTER_INVERT_2 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 194*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_MCP,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: MCP AFTER_INVERT_3 is not 0",
{NA, NA, ~0xFFFF0000, 0, NA, NA} },
/*line 195*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_MCP,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"AEU: MCP AFTER_INVERT_4 is not 0",
{NA, NA, ~0x801FFFFF, 0, NA, NA} },
/*line 196*/{(0xF), 5, PBF_REG_P0_CREDIT,
PBF_REG_P0_INIT_CRD, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_P0, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P0 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 197*/{(0xF), 5, PBF_REG_P1_CREDIT,
PBF_REG_P1_INIT_CRD, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_P1, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P1 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 198*/{(0xF), 3, PBF_REG_P4_CREDIT,
PBF_REG_P4_INIT_CRD, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P4 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 199*/{(0x10), 5, PBF_REG_CREDIT_Q0,
PBF_REG_INIT_CRD_Q0, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q0, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q0 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 200*/{(0x10), 5, PBF_REG_CREDIT_Q1,
PBF_REG_INIT_CRD_Q1, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q1, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q1 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 201*/{(0x10), 5, PBF_REG_CREDIT_Q2,
PBF_REG_INIT_CRD_Q2, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q2, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q2 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 202*/{(0x10), 5, PBF_REG_CREDIT_Q3,
PBF_REG_INIT_CRD_Q3, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q3, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q3 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 203*/{(0x10), 5, PBF_REG_CREDIT_Q4,
PBF_REG_INIT_CRD_Q4, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q4, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q4 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 204*/{(0x10), 5, PBF_REG_CREDIT_Q5,
PBF_REG_INIT_CRD_Q5, 1, 0, pneq_r2,
PBF_REG_DISABLE_NEW_TASK_PROC_Q5, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q5 credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 205*/{(0x10), 3, PBF_REG_CREDIT_LB_Q,
PBF_REG_INIT_CRD_LB_Q, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: LB Q credit is not equal to init_crd",
{NA, NA, NA, NA, NA, NA} },
/*line 206*/{(0xF), 1, PBF_REG_P0_TASK_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P0 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 207*/{(0xF), 1, PBF_REG_P1_TASK_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P1 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 208*/{(0xF), 1, PBF_REG_P4_TASK_CNT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: P4 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 209*/{(0x10), 1, PBF_REG_TASK_CNT_Q0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q0 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 210*/{(0x10), 1, PBF_REG_TASK_CNT_Q1,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q1 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 211*/{(0x10), 1, PBF_REG_TASK_CNT_Q2,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q2 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 212*/{(0x10), 1, PBF_REG_TASK_CNT_Q3,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q3 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 213*/{(0x10), 1, PBF_REG_TASK_CNT_Q4,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q4 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 214*/{(0x10), 1, PBF_REG_TASK_CNT_Q5,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: Q5 task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 215*/{(0x10), 1, PBF_REG_TASK_CNT_LB_Q,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PBF: LB Q task_cnt is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 216*/{(0x1F), 1, XCM_REG_CFC_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: CFC_INIT_CRD is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 217*/{(0x1F), 1, UCM_REG_CFC_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: CFC_INIT_CRD is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 218*/{(0x1F), 1, TCM_REG_CFC_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: CFC_INIT_CRD is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 219*/{(0x1F), 1, CCM_REG_CFC_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: CFC_INIT_CRD is not 1",
{NA, NA, 1, NA, NA, NA} },
/*line 220*/{(0x1F), 1, XCM_REG_XQM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: XQM_INIT_CRD is not 32",
{NA, NA, 32, NA, NA, NA} },
/*line 221*/{(0x1F), 1, UCM_REG_UQM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: UQM_INIT_CRD is not 32",
{NA, NA, 32, NA, NA, NA} },
/*line 222*/{(0x1F), 1, TCM_REG_TQM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: TQM_INIT_CRD is not 32",
{NA, NA, 32, NA, NA, NA} },
/*line 223*/{(0x1F), 1, CCM_REG_CQM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: CQM_INIT_CRD is not 32",
{NA, NA, 32, NA, NA, NA} },
/*line 224*/{(0x1F), 1, XCM_REG_TM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: TM_INIT_CRD is not 4",
{NA, NA, 4, NA, NA, NA} },
/*line 225*/{(0x1F), 1, UCM_REG_TM_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: TM_INIT_CRD is not 4",
{NA, NA, 4, NA, NA, NA} },
/*line 226*/{(0x1F), 1, XCM_REG_FIC0_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XCM: FIC0_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 227*/{(0x1F), 1, UCM_REG_FIC0_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: FIC0_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 228*/{(0x1F), 1, TCM_REG_FIC0_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: FIC0_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 229*/{(0x1F), 1, CCM_REG_FIC0_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: FIC0_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 230*/{(0x1F), 1, XCM_REG_FIC1_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: FIC1_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 231*/{(0x1F), 1, UCM_REG_FIC1_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: FIC1_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 232*/{(0x1F), 1, TCM_REG_FIC1_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: FIC1_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 233*/{(0x1F), 1, CCM_REG_FIC1_INIT_CRD,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: FIC1_INIT_CRD is not 64",
{NA, NA, 64, NA, NA, NA} },
/*line 234*/{(0x1), 1, XCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: XX_FREE differs from expected 31",
{NA, NA, 31, NA, NA, NA} },
/*line 235*/{(0x1E), 1, XCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XCM: XX_FREE differs from expected 32",
{NA, NA, 32, NA, NA, NA} },
/*line 236*/{(0x1F), 1, UCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"UCM: XX_FREE differs from expected 27",
{NA, NA, 27, NA, NA, NA} },
/*line 237*/{(0x7), 1, TCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: XX_FREE differs from expected 32",
{NA, NA, 32, NA, NA, NA} },
/*line 238*/{(0x18), 1, TCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TCM: XX_FREE differs from expected 29",
{NA, NA, 29, NA, NA, NA} },
/*line 239*/{(0x1F), 1, CCM_REG_XX_FREE,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CCM: XX_FREE differs from expected 24",
{NA, NA, 24, NA, NA, NA} },
/*line 240*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18000,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSEM: FOC0 credit less than initial credit",
{NA, NA, 0, NA, NA, NA} },
/*line 241*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18040,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSEM: FOC1 credit less than initial credit",
{NA, NA, 24, NA, NA, NA} },
/*line 242*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18080,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"XSEM: FOC2 credit less than initial credit",
{NA, NA, 12, NA, NA, NA} },
/*line 243*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18000,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: FOC0 credit less than initial credit",
{NA, NA, 26, NA, NA, NA} },
/*line 244*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18040,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: FOC1 credit less than initial credit",
{NA, NA, 78, NA, NA, NA} },
/*line 245*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18080,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: FOC2 credit less than initial credit",
{NA, NA, 16, NA, NA, NA} },
/*line 246*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x180C0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"USEM: FOC3 credit less than initial credit",
{NA, NA, 32, NA, NA, NA} },
/*line 247*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18000,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: FOC0 credit less than initial credit",
{NA, NA, 52, NA, NA, NA} },
/*line 248*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18040,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: FOC1 credit less than initial credit",
{NA, NA, 24, NA, NA, NA} },
/*line 249*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18080,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: FOC2 credit less than initial credit",
{NA, NA, 12, NA, NA, NA} },
/*line 250*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x180C0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"TSEM: FOC3 credit less than initial credit",
{NA, NA, 32, NA, NA, NA} },
/*line 251*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18000,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: FOC0 credit less than initial credit",
{NA, NA, 16, NA, NA, NA} },
/*line 252*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18040,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: FOC1 credit less than initial credit",
{NA, NA, 18, NA, NA, NA} },
/*line 253*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18080,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: FOC2 credit less than initial credit",
{NA, NA, 48, NA, NA, NA} },
/*line 254*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x180C0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"CSEM: FOC3 credit less than initial credit",
{NA, NA, 14, NA, NA, NA} },
/*line 255*/{(0x1F), 1, PRS_REG_TSDM_CURRENT_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: TSDM current credit is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 256*/{(0x1F), 1, PRS_REG_TCM_CURRENT_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: TCM current credit is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 257*/{(0x1F), 1, PRS_REG_CFC_LD_CURRENT_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: CFC_LD current credit is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 258*/{(0x1F), 1, PRS_REG_CFC_SEARCH_CURRENT_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: CFC_SEARCH current credit is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 259*/{(0x1F), 1, PRS_REG_SRC_CURRENT_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: SRCH current credit is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 260*/{(0x1F), 1, PRS_REG_PENDING_BRB_PRS_RQ,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: PENDING_BRB_PRS_RQ is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 261*/{(0x1F), 2, PRS_REG_PENDING_BRB_CAC0_RQ,
NA, 5, 4, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: PENDING_BRB_CAC_RQ is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 262*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_LSB,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: SERIAL_NUM_STATUS_LSB is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 263*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_MSB,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"PRS: SERIAL_NUM_STATUS_MSB is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 264*/{(0x1F), 1, CDU_REG_ERROR_DATA,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CDU: ERROR_DATA is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 265*/{(0x1F), 1, CCM_REG_STORM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: STORM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 266*/{(0x1F), 1, CCM_REG_CSDM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: CSDM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 267*/{(0x1F), 1, CCM_REG_TSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: TSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 268*/{(0x1F), 1, CCM_REG_XSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: XSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 269*/{(0x1F), 1, CCM_REG_USEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: USEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 270*/{(0x1F), 1, CCM_REG_PBF_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"CCM: PBF declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 271*/{(0x1F), 1, TCM_REG_STORM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: STORM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 272*/{(0x1F), 1, TCM_REG_TSDM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: TSDM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 273*/{(0x1F), 1, TCM_REG_PRS_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: PRS declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 274*/{(0x1F), 1, TCM_REG_PBF_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: PBF declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 275*/{(0x1F), 1, TCM_REG_USEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: USEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 276*/{(0x1F), 1, TCM_REG_CSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"TCM: CSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 277*/{(0x1F), 1, UCM_REG_STORM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: STORM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 278*/{(0x1F), 1, UCM_REG_USDM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: USDM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 279*/{(0x1F), 1, UCM_REG_TSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: TSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 280*/{(0x1F), 1, UCM_REG_CSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: CSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 281*/{(0x1F), 1, UCM_REG_XSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: XSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 282*/{(0x1F), 1, UCM_REG_DORQ_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"UCM: DORQ declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 283*/{(0x1F), 1, XCM_REG_STORM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: STORM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 284*/{(0x1F), 1, XCM_REG_XSDM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: XSDM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 285*/{(0x1F), 1, XCM_REG_TSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: TSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 286*/{(0x1F), 1, XCM_REG_CSEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: CSEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 287*/{(0x1F), 1, XCM_REG_USEM_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: USEM declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 288*/{(0x1F), 1, XCM_REG_DORQ_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: DORQ declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 289*/{(0x1F), 1, XCM_REG_PBF_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: PBF declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 290*/{(0x1F), 1, XCM_REG_NIG0_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: NIG0 declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 291*/{(0x1F), 1, XCM_REG_NIG1_LENGTH_MIS,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"XCM: NIG1 declared message length unequal to actual",
{NA, NA, 0, NA, NA, NA} },
/*line 292*/{(0x1F), 1, QM_REG_XQM_WRC_FIFOLVL,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: XQM wrc_fifolvl is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 293*/{(0x1F), 1, QM_REG_UQM_WRC_FIFOLVL,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: UQM wrc_fifolvl is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 294*/{(0x1F), 1, QM_REG_TQM_WRC_FIFOLVL,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: TQM wrc_fifolvl is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 295*/{(0x1F), 1, QM_REG_CQM_WRC_FIFOLVL,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: CQM wrc_fifolvl is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 296*/{(0x1F), 1, QM_REG_QSTATUS_LOW,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: QSTATUS_LOW is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 297*/{(0x1F), 1, QM_REG_QSTATUS_HIGH,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: QSTATUS_HIGH is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 298*/{(0x1F), 1, QM_REG_PAUSESTATE0,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: PAUSESTATE0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 299*/{(0x1F), 1, QM_REG_PAUSESTATE1,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: PAUSESTATE1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 300*/{(0x1F), 1, QM_REG_OVFQNUM,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: OVFQNUM is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 301*/{(0x1F), 1, QM_REG_OVFERROR,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: OVFERROR is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 302*/{(0x1F), 6, QM_REG_PTRTBL,
NA, 64, 8, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: read and write variables not equal",
{NA, NA, NA, NA, NA, NA} },
/*line 303*/{(0x1F), 1, BRB1_REG_BRB1_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"BRB1: parity status is not 0",
{NA, NA, ~0x8, 0, NA, NA} },
/*line 304*/{(0x1F), 1, CDU_REG_CDU_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"CDU: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 305*/{(0x1F), 1, CFC_REG_CFC_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"CFC: parity status is not 0",
{NA, NA, ~0x2, 0, NA, NA} },
/*line 306*/{(0x1F), 1, CSDM_REG_CSDM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"CSDM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 307*/{(0x3), 1, DBG_REG_DBG_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"DBG: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 308*/{(0x1F), 1, DMAE_REG_DMAE_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"DMAE: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 309*/{(0x1F), 1, DORQ_REG_DORQ_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"DORQ: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 310*/{(0x1), 1, TCM_REG_TCM_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"TCM: parity status is not 0",
{NA, NA, ~0x3ffc0, 0, NA, NA} },
/*line 311*/{(0x1E), 1, TCM_REG_TCM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"TCM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 312*/{(0x1), 1, CCM_REG_CCM_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"CCM: parity status is not 0",
{NA, NA, ~0x3ffc0, 0, NA, NA} },
/*line 313*/{(0x1E), 1, CCM_REG_CCM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"CCM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 314*/{(0x1), 1, UCM_REG_UCM_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"UCM: parity status is not 0",
{NA, NA, ~0x3ffc0, 0, NA, NA} },
/*line 315*/{(0x1E), 1, UCM_REG_UCM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"UCM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 316*/{(0x1), 1, XCM_REG_XCM_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"XCM: parity status is not 0",
{NA, NA, ~0x3ffc0, 0, NA, NA} },
/*line 317*/{(0x1E), 1, XCM_REG_XCM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XCM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 318*/{(0x1), 1, HC_REG_HC_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"HC: parity status is not 0",
{NA, NA, ~0x1, 0, NA, NA} },
/*line 319*/{(0x1), 1, MISC_REG_MISC_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"MISC: parity status is not 0",
{NA, NA, ~0x1, 0, NA, NA} },
/*line 320*/{(0x1F), 1, PRS_REG_PRS_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PRS: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 321*/{(0x1F), 1, PXP_REG_PXP_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 322*/{(0x1F), 1, QM_REG_QM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"QM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 323*/{(0x1), 1, SRC_REG_SRC_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"SRCH: parity status is not 0",
{NA, NA, ~0x4, 0, NA, NA} },
/*line 324*/{(0x1F), 1, TSDM_REG_TSDM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"TSDM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 325*/{(0x1F), 1, USDM_REG_USDM_PRTY_STS,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"USDM: parity status is not 0",
{NA, NA, ~0x20, 0, NA, NA} },
/*line 326*/{(0x1F), 1, XSDM_REG_XSDM_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XSDM: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 327*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XPB: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 328*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"UPB: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 329*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"CSEM: parity status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 330*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_0,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PXP2: parity status 0 is not 0",
{NA, NA, ~0xfff40020, 0, NA, NA} },
/*line 331*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_0,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PXP2: parity status 0 is not 0",
{NA, NA, ~0x20, 0, NA, NA} },
/*line 332*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"TSEM: parity status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 333*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"USEM: parity status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 334*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XSEM: parity status 0 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 335*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"CSEM: parity status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 336*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_1,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"PXP2: parity status 1 is not 0",
{NA, NA, ~0x20, 0, NA, NA} },
/*line 337*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PXP2: parity status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 338*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"TSEM: parity status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 339*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"USEM: parity status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 340*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_1,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"XSEM: parity status 1 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 341*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGLUE_B: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 342*/{(0x2), 2, QM_REG_QTASKCTR_EXT_A_0,
NA, 64, 4, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Q_EXT_A (upper 64 queues), Queue is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 343*/{(0x2), 1, QM_REG_QSTATUS_LOW_EXT_A,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: QSTATUS_LOW_EXT_A is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 344*/{(0x2), 1, QM_REG_QSTATUS_HIGH_EXT_A,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: QSTATUS_HIGH_EXT_A is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 345*/{(0x1E), 1, QM_REG_PAUSESTATE2,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: PAUSESTATE2 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 346*/{(0x1E), 1, QM_REG_PAUSESTATE3,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: PAUSESTATE3 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 347*/{(0x2), 1, QM_REG_PAUSESTATE4,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: PAUSESTATE4 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 348*/{(0x2), 1, QM_REG_PAUSESTATE5,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: PAUSESTATE5 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 349*/{(0x2), 1, QM_REG_PAUSESTATE6,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: PAUSESTATE6 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 350*/{(0x2), 1, QM_REG_PAUSESTATE7,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"QM: PAUSESTATE7 is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 351*/{(0x2), 6, QM_REG_PTRTBL_EXT_A,
NA, 64, 8, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: read and write variables not equal in ext table",
{NA, NA, NA, NA, NA, NA} },
/*line 352*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_OCCURRED,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"MISC: system kill occurred;",
{NA, NA, 0, NA, NA, NA} },
/*line 353*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_0,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"MISC: system kill occurred; status_0 register",
{NA, NA, 0, NA, NA, NA} },
/*line 354*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"MISC: system kill occurred; status_1 register",
{NA, NA, 0, NA, NA, NA} },
/*line 355*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_2,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"MISC: system kill occurred; status_2 register",
{NA, NA, 0, NA, NA, NA} },
/*line 356*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_3,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"MISC: system kill occurred; status_3 register",
{NA, NA, 0, NA, NA, NA} },
/*line 357*/{(0x1E), 1, MISC_REG_PCIE_HOT_RESET,
NA, NA, NA, pneq,
NA, IDLE_CHK_WARNING,
"MISC: pcie_rst_b was asserted without perst assertion",
{NA, NA, 0, NA, NA, NA} },
/*line 358*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_ERROR,
"NIG: interrupt 0 is active",
{NA, NA, ~0x300, 0, NA, NA} },
/*line 359*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
NA, NA, NA, peq,
NA, IDLE_CHK_WARNING,
"NIG: Access to BMAC while not active. If tested on FPGA, ignore this warning",
{NA, NA, 0x300, NA, NA, NA} },
/*line 360*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_ERROR,
"NIG: interrupt 1 is active",
{NA, NA, 0x783FF03, 0, NA, NA} },
/*line 361*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"NIG: port cos was paused too long",
{NA, NA, ~0x783FF0F, 0, NA, NA} },
/*line 362*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"NIG: Got packets w/o Outer-VLAN in MF mode",
{NA, NA, 0xC, 0, NA, NA} },
/*line 363*/{(0x2), 1, NIG_REG_NIG_PRTY_STS,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_ERROR,
"NIG: parity interrupt is active",
{NA, NA, ~0xFFC00000, 0, NA, NA} },
/*line 364*/{(0x1C), 1, NIG_REG_NIG_PRTY_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_ERROR,
"NIG: parity 0 interrupt is active",
{NA, NA, ~0xFFC00000, 0, NA, NA} },
/*line 365*/{(0x4), 1, NIG_REG_NIG_PRTY_STS_1,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_ERROR,
"NIG: parity 1 interrupt is active",
{NA, NA, 0xff, 0, NA, NA} },
/*line 366*/{(0x18), 1, NIG_REG_NIG_PRTY_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"NIG: parity 1 interrupt is active",
{NA, NA, 0, NA, NA, NA} },
/*line 367*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"TSEM: interrupt 0 is active",
{NA, NA, ~0x10000000, 0, NA, NA} },
/*line 368*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
NA, NA, NA, peq,
NA, IDLE_CHK_WARNING,
"TSEM: interrupt 0 is active",
{NA, NA, 0x10000000, NA, NA, NA} },
/*line 369*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"TSEM: interrupt 1 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 370*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"CSEM: interrupt 0 is active",
{NA, NA, ~0x10000000, 0, NA, NA} },
/*line 371*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
NA, NA, NA, peq,
NA, IDLE_CHK_WARNING,
"CSEM: interrupt 0 is active",
{NA, NA, 0x10000000, NA, NA, NA} },
/*line 372*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"CSEM: interrupt 1 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 373*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"USEM: interrupt 0 is active",
{NA, NA, ~0x10000000, 0, NA, NA} },
/*line 374*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
NA, NA, NA, peq,
NA, IDLE_CHK_WARNING,
"USEM: interrupt 0 is active",
{NA, NA, 0x10000000, NA, NA, NA} },
/*line 375*/{(0x1F), 1, USEM_REG_USEM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"USEM: interrupt 1 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 376*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
NA, NA, NA, pand_neq,
NA, IDLE_CHK_WARNING,
"XSEM: interrupt 0 is active",
{NA, NA, ~0x10000000, 0, NA, NA} },
/*line 377*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
NA, NA, NA, peq,
NA, IDLE_CHK_WARNING,
"XSEM: interrupt 0 is active",
{NA, NA, 0x10000000, NA, NA, NA} },
/*line 378*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"XSEM: interrupt 1 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 379*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_0,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"TSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 380*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"TSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 381*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_0,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"CSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 382*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"CSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 383*/{(0x1F), 1, USDM_REG_USDM_INT_STS_0,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"USDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 384*/{(0x1F), 1, USDM_REG_USDM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"USDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 385*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_0,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"XSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 386*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_1,
NA, NA, NA, pneq,
NA, IDLE_CHK_ERROR,
"XSDM: interrupt 0 is active",
{NA, NA, 0, NA, NA, NA} },
/*line 387*/{(0x2), 1, HC_REG_HC_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"HC: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 388*/{(0x1E), 1, MISC_REG_MISC_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"MISC: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 389*/{(0x1E), 1, SRC_REG_SRC_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"SRCH: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 390*/{(0xC), 3, QM_REG_BYTECRD0,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 0 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 391*/{(0xC), 3, QM_REG_BYTECRD1,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 1 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 392*/{(0xC), 3, QM_REG_BYTECRD2,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 2 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 393*/{(0x1C), 1, QM_REG_VOQCRDERRREG,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"QM: VOQ credit error register is not 0 (VOQ credit overflow/underflow)",
{NA, NA, 0xFFFF, 0, NA, NA} },
/*line 394*/{(0x1C), 1, QM_REG_BYTECRDERRREG,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"QM: Byte credit error register is not 0 (Byte credit overflow/underflow)",
{NA, NA, 0xFFF, 0, NA, NA} },
/*line 395*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_31_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: FLR request is set for VF addresses 31-0",
{NA, NA, 0, NA, NA, NA} },
/*line 396*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_63_32,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: FLR request is set for VF addresses 63-32",
{NA, NA, 0, NA, NA, NA} },
/*line 397*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_95_64,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: FLR request is set for VF addresses 95-64",
{NA, NA, 0, NA, NA, NA} },
/*line 398*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_127_96,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: FLR request is set for VF addresses 127-96",
{NA, NA, 0, NA, NA, NA} },
/*line 399*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_PF_7_0,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: FLR request is set for PF addresses 7-0",
{NA, NA, 0, NA, NA, NA} },
/*line 400*/{(0x1C), 1, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: SR-IOV disable request is set",
{NA, NA, 0, NA, NA, NA} },
/*line 401*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_A_REQUEST,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: Cfg-Space A request is set",
{NA, NA, 0, NA, NA, NA} },
/*line 402*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_B_REQUEST,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"PGL: Cfg-Space B request is set",
{NA, NA, 0, NA, NA, NA} },
/*line 403*/{(0x1C), 1, IGU_REG_ERROR_HANDLING_DATA_VALID,
NA, NA, 0, pneq,
NA, IDLE_CHK_WARNING,
"IGU: some unauthorized commands arrived to the IGU. Use igu_dump_fifo utility for more details",
{NA, NA, 0, NA, NA, NA} },
/*line 404*/{(0x1C), 1, IGU_REG_ATTN_WRITE_DONE_PENDING,
NA, NA, NA, pneq,
NA, IDLE_CHK_WARNING,
"IGU attention message write done pending is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 405*/{(0x1C), 1, IGU_REG_WRITE_DONE_PENDING,
NA, 5, 4, pneq,
NA, IDLE_CHK_WARNING,
"IGU MSI/MSIX message write done pending is not empty",
{NA, NA, 0, NA, NA, NA} },
/*line 406*/{(0x1C), 1, IGU_REG_IGU_PRTY_STS,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"IGU: parity status is not 0",
{NA, NA, 0, NA, NA, NA} },
/*line 407*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_0)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 408*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_0)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 409*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_1)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 410*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_1)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 411*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (MCP)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 412*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
NA, IDLE_CHK_ERROR,
"MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (MCP)",
{NA, NA, 0x4000000, 0, NA, NA} },
/*line 413*/{(0x1C), 1, IGU_REG_SILENT_DROP,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"Some messages were not executed in the IGU",
{NA, NA, 0, NA, NA, NA} },
/*line 414*/{(0x1C), 1, PXP2_REG_PSWRQ_BW_CREDIT,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR,
"PXP2: rq_read_credit and rq_write_credit are not 5",
{NA, NA, 0x2D, NA, NA, NA} },
/*line 415*/{(0x1C), 1, IGU_REG_SB_CTRL_FSM,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
{NA, NA, 0, NA, NA, NA} },
/*line 416*/{(0x1C), 1, IGU_REG_INT_HANDLE_FSM,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. INT_HANDLE_FSM should be zero in idle state",
{NA, NA, 0, NA, NA, NA} },
/*line 417*/{(0x1C), 1, IGU_REG_ATTN_FSM,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. SB_ATTN_FSMshould be zeroor two in idle state",
{NA, NA, ~0x2, 0, NA, NA} },
/*line 418*/{(0x1C), 1, IGU_REG_CTRL_FSM,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
{NA, NA, ~0x1, 0, NA, NA} },
/*line 419*/{(0x1C), 1, IGU_REG_PXP_ARB_FSM,
NA, 1, 0, pand_neq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. SB_ARB_FSM should be zero in idle state",
{NA, NA, ~0x1, 0, NA, NA} },
/*line 420*/{(0x1C), 1, IGU_REG_PENDING_BITS_STATUS,
NA, 5, 4, pneq,
NA, IDLE_CHK_WARNING,
"IGU: block is not in idle. There are pending write done",
{NA, NA, 0, NA, NA, NA} },
/*line 421*/{(0x10), 3, QM_REG_VOQCREDIT_0,
QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_0, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 422*/{(0x10), 3, QM_REG_VOQCREDIT_1,
QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_1, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 423*/{(0x10), 3, QM_REG_VOQCREDIT_2,
QM_REG_VOQINITCREDIT_2, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_2, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 424*/{(0x10), 3, QM_REG_VOQCREDIT_3,
QM_REG_VOQINITCREDIT_3, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_3, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 425*/{(0x10), 3, QM_REG_VOQCREDIT_4,
QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_4, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 426*/{(0x10), 3, QM_REG_VOQCREDIT_5,
QM_REG_VOQINITCREDIT_5, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_5, VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 427*/{(0x10), 3, QM_REG_VOQCREDIT_6,
QM_REG_VOQINITCREDIT_6, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: VOQ_6 (LB VOQ), VOQ credit is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 428*/{(0x10), 3, QM_REG_BYTECRD0,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 0 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 429*/{(0x10), 3, QM_REG_BYTECRD1,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 1 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 430*/{(0x10), 3, QM_REG_BYTECRD2,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 2 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 431*/{(0x10), 3, QM_REG_BYTECRD3,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 3 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 432*/{(0x10), 3, QM_REG_BYTECRD4,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 4 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 433*/{(0x10), 3, QM_REG_BYTECRD5,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 5 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 434*/{(0x10), 3, QM_REG_BYTECRD6,
QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"QM: Byte credit 6 is not equal to initial credit",
{NA, NA, NA, NA, NA, NA} },
/*line 435*/{(0x10), 1, QM_REG_FWVOQ0TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq0 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 436*/{(0x10), 1, QM_REG_FWVOQ1TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq1 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 437*/{(0x10), 1, QM_REG_FWVOQ2TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq2 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 438*/{(0x10), 1, QM_REG_FWVOQ3TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq3 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 439*/{(0x10), 1, QM_REG_FWVOQ4TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq4 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 440*/{(0x10), 1, QM_REG_FWVOQ5TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq5 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 441*/{(0x10), 1, QM_REG_FWVOQ6TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq6 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 442*/{(0x10), 1, QM_REG_FWVOQ7TOHWVOQ,
NA, 1, 0, peq,
NA, IDLE_CHK_ERROR,
"QM: FwVoq7 is mapped to HwVoq7 (non-TX HwVoq)",
{NA, NA, 0x7, NA, NA, NA} },
/*line 443*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT0_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 0 EOP FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 444*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT1_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 1 EOP FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 445*/{(0x1F), 1, NIG_REG_INGRESS_EOP_LB_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: LB EOP FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 446*/{(0x1F), 1, NIG_REG_INGRESS_RMP0_DSCR_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 0 RX MCP descriptor FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 447*/{(0x1F), 1, NIG_REG_INGRESS_RMP1_DSCR_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 1 RX MCP descriptor FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 448*/{(0x1F), 1, NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF LB FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 449*/{(0x1F), 1, NIG_REG_EGRESS_MNG0_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 0 TX MCP FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 450*/{(0x1F), 1, NIG_REG_EGRESS_MNG1_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 1 TX MCP FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 451*/{(0x1F), 1, NIG_REG_EGRESS_DEBUG_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Debug FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 452*/{(0x1F), 1, NIG_REG_EGRESS_DELAY0_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF0 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 453*/{(0x1F), 1, NIG_REG_EGRESS_DELAY1_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF1 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 454*/{(0x1F), 1, NIG_REG_LLH0_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 0 RX LLH FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 455*/{(0x1F), 1, NIG_REG_LLH1_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 1 RX LLH FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 456*/{(0x1C), 1, NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 0 TX MCP FIFO for traffic going to the host is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 457*/{(0x1C), 1, NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 1 TX MCP FIFO for traffic going to the host is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 458*/{(0x1C), 1, NIG_REG_P0_TLLH_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 0 TX LLH FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 459*/{(0x1C), 1, NIG_REG_P1_TLLH_FIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 1 TX LLH FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 460*/{(0x1C), 1, NIG_REG_P0_HBUF_DSCR_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 0 RX MCP descriptor FIFO for traffic from the host is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 461*/{(0x1C), 1, NIG_REG_P1_HBUF_DSCR_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_WARNING,
"NIG: Port 1 RX MCP descriptor FIFO for traffic from the host is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 462*/{(0x18), 1, NIG_REG_P0_RX_MACFIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 0 RX MAC interface FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 463*/{(0x18), 1, NIG_REG_P1_RX_MACFIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 1 RX MAC interface FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 464*/{(0x18), 1, NIG_REG_P0_TX_MACFIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 0 TX MAC interface FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 465*/{(0x18), 1, NIG_REG_P1_TX_MACFIFO_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: Port 1 TX MAC interface FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 466*/{(0x10), 1, NIG_REG_EGRESS_DELAY2_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF2 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 467*/{(0x10), 1, NIG_REG_EGRESS_DELAY3_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF3 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 468*/{(0x10), 1, NIG_REG_EGRESS_DELAY4_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF4 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
/*line 469*/{(0x10), 1, NIG_REG_EGRESS_DELAY5_EMPTY,
NA, 1, 0, pneq,
NA, IDLE_CHK_ERROR_NO_TRAFFIC,
"NIG: PBF IF5 FIFO is not empty",
{NA, NA, 1, NA, NA, NA} },
};
/* handle self test fails according to severity and type */
static void bnx2x_self_test_log(struct bnx2x *bp, u8 severity, char *message)
{
switch (severity) {
case IDLE_CHK_ERROR:
BNX2X_ERR("ERROR %s", message);
idle_chk_errors++;
break;
case IDLE_CHK_ERROR_NO_TRAFFIC:
DP(NETIF_MSG_HW, "INFO %s", message);
break;
case IDLE_CHK_WARNING:
DP(NETIF_MSG_HW, "WARNING %s", message);
idle_chk_warnings++;
break;
}
}
/* specific test for QM rd/wr pointers and rd/wr banks */
static void bnx2x_idle_chk6(struct bnx2x *bp,
struct st_record *rec, char *message)
{
u32 rd_ptr, wr_ptr, rd_bank, wr_bank;
int i;
for (i = 0; i < rec->loop; i++) {
/* read regs */
rec->pred_args.val1 =
REG_RD(bp, rec->reg1 + i * rec->incr);
rec->pred_args.val2 =
REG_RD(bp, rec->reg1 + i * rec->incr + 4);
/* calc read and write pointers */
rd_ptr = ((rec->pred_args.val1 & 0x3FFFFFC0) >> 6);
wr_ptr = ((((rec->pred_args.val1 & 0xC0000000) >> 30) & 0x3) |
((rec->pred_args.val2 & 0x3FFFFF) << 2));
/* perfrom pointer test */
if (rd_ptr != wr_ptr) {
snprintf(message, MAX_FAIL_MSG,
"QM: PTRTBL entry %d- rd_ptr is not equal to wr_ptr. Values are 0x%x and 0x%x\n",
i, rd_ptr, wr_ptr);
bnx2x_self_test_log(bp, rec->severity, message);
}
/* calculate read and write banks */
rd_bank = ((rec->pred_args.val1 & 0x30) >> 4);
wr_bank = (rec->pred_args.val1 & 0x03);
/* perform bank test */
if (rd_bank != wr_bank) {
snprintf(message, MAX_FAIL_MSG,
"QM: PTRTBL entry %d - rd_bank is not equal to wr_bank. Values are 0x%x 0x%x\n",
i, rd_bank, wr_bank);
bnx2x_self_test_log(bp, rec->severity, message);
}
}
}
/* specific test for cfc info ram and cid cam */
static void bnx2x_idle_chk7(struct bnx2x *bp,
struct st_record *rec, char *message)
{
int i;
/* iterate through lcids */
for (i = 0; i < rec->loop; i++) {
/* make sure cam entry is valid (bit 0) */
if ((REG_RD(bp, (rec->reg2 + i * 4)) & 0x1) != 0x1)
continue;
/* get connection type (multiple reads due to widebus) */
REG_RD(bp, (rec->reg1 + i * rec->incr));
REG_RD(bp, (rec->reg1 + i * rec->incr + 4));
rec->pred_args.val1 =
REG_RD(bp, (rec->reg1 + i * rec->incr + 8));
REG_RD(bp, (rec->reg1 + i * rec->incr + 12));
/* obtain connection type */
if (is_e1 || is_e1h) {
/* E1 E1H (bits 4..7) */
rec->pred_args.val1 &= 0x78;
rec->pred_args.val1 >>= 3;
} else {
/* E2 E3A0 E3B0 (bits 26..29) */
rec->pred_args.val1 &= 0x1E000000;
rec->pred_args.val1 >>= 25;
}
/* get activity counter value */
rec->pred_args.val2 = REG_RD(bp, rec->reg3 + i * 4);
/* validate ac value is legal for con_type at idle state */
if (rec->bnx2x_predicate(&rec->pred_args)) {
snprintf(message, MAX_FAIL_MSG,
"%s. Values are 0x%x 0x%x\n", rec->fail_msg,
rec->pred_args.val1, rec->pred_args.val2);
bnx2x_self_test_log(bp, rec->severity, message);
}
}
}
/* self test procedure
* scan auto-generated database
* for each line:
* 1. compare chip mask
* 2. determine type (according to maro number)
* 3. read registers
* 4. call predicate
* 5. collate results and statistics
*/
int bnx2x_idle_chk(struct bnx2x *bp)
{
u16 i; /* loop counter */
u16 st_ind; /* self test database access index */
struct st_record rec; /* current record variable */
char message[MAX_FAIL_MSG]; /* message to log */
/*init stats*/
idle_chk_errors = 0;
idle_chk_warnings = 0;
/*create masks for all chip types*/
is_e1 = CHIP_IS_E1(bp);
is_e1h = CHIP_IS_E1H(bp);
is_e2 = CHIP_IS_E2(bp);
is_e3a0 = CHIP_IS_E3A0(bp);
is_e3b0 = CHIP_IS_E3B0(bp);
/*database main loop*/
for (st_ind = 0; st_ind < ST_DB_LINES; st_ind++) {
rec = st_database[st_ind];
/*check if test applies to chip*/
if (!((rec.chip_mask & IDLE_CHK_E1) && is_e1) &&
!((rec.chip_mask & IDLE_CHK_E1H) && is_e1h) &&
!((rec.chip_mask & IDLE_CHK_E2) && is_e2) &&
!((rec.chip_mask & IDLE_CHK_E3A0) && is_e3a0) &&
!((rec.chip_mask & IDLE_CHK_E3B0) && is_e3b0))
continue;
/* identify macro */
switch (rec.macro) {
case 1:
/* read single reg and call predicate */
rec.pred_args.val1 = REG_RD(bp, rec.reg1);
DP(BNX2X_MSG_IDLE, "mac1 add %x\n", rec.reg1);
if (rec.bnx2x_predicate(&rec.pred_args)) {
snprintf(message, sizeof(message),
"%s.Value is 0x%x\n", rec.fail_msg,
rec.pred_args.val1);
bnx2x_self_test_log(bp, rec.severity, message);
}
break;
case 2:
/* read repeatedly starting from reg1 and call
* predicate after each read
*/
for (i = 0; i < rec.loop; i++) {
rec.pred_args.val1 =
REG_RD(bp, rec.reg1 + i * rec.incr);
DP(BNX2X_MSG_IDLE, "mac2 add %x\n", rec.reg1);
if (rec.bnx2x_predicate(&rec.pred_args)) {
snprintf(message, sizeof(message),
"%s. Value is 0x%x in loop %d\n",
rec.fail_msg,
rec.pred_args.val1, i);
bnx2x_self_test_log(bp, rec.severity,
message);
}
}
break;
case 3:
/* read two regs and call predicate */
rec.pred_args.val1 = REG_RD(bp, rec.reg1);
rec.pred_args.val2 = REG_RD(bp, rec.reg2);
DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x\n",
rec.reg1, rec.reg2);
if (rec.bnx2x_predicate(&rec.pred_args)) {
snprintf(message, sizeof(message),
"%s. Values are 0x%x 0x%x\n",
rec.fail_msg, rec.pred_args.val1,
rec.pred_args.val2);
bnx2x_self_test_log(bp, rec.severity, message);
}
break;
case 4:
/*unused to-date*/
for (i = 0; i < rec.loop; i++) {
rec.pred_args.val1 =
REG_RD(bp, rec.reg1 + i * rec.incr);
rec.pred_args.val2 =
(REG_RD(bp,
rec.reg2 + i * rec.incr)) >> 1;
if (rec.bnx2x_predicate(&rec.pred_args)) {
snprintf(message, sizeof(message),
"%s. Values are 0x%x 0x%x in loop %d\n",
rec.fail_msg,
rec.pred_args.val1,
rec.pred_args.val2, i);
bnx2x_self_test_log(bp, rec.severity,
message);
}
}
break;
case 5:
/* compare two regs, pending
* the value of a condition reg
*/
rec.pred_args.val1 = REG_RD(bp, rec.reg1);
rec.pred_args.val2 = REG_RD(bp, rec.reg2);
DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x add3 %x\n",
rec.reg1, rec.reg2, rec.reg3);
if (REG_RD(bp, rec.reg3) != 0) {
if (rec.bnx2x_predicate(&rec.pred_args)) {
snprintf(message, sizeof(message),
"%s. Values are 0x%x 0x%x\n",
rec.fail_msg,
rec.pred_args.val1,
rec.pred_args.val2);
bnx2x_self_test_log(bp, rec.severity,
message);
}
}
break;
case 6:
/* compare read and write pointers
* and read and write banks in QM
*/
bnx2x_idle_chk6(bp, &rec, message);
break;
case 7:
/* compare cfc info cam with cid cam */
bnx2x_idle_chk7(bp, &rec, message);
break;
default:
DP(BNX2X_MSG_IDLE,
"unknown macro in self test data base. macro %d line %d",
rec.macro, st_ind);
}
}
/* abort if interface is not running */
if (!netif_running(bp->dev))
return idle_chk_errors;
/* return value accorindg to statistics */
if (idle_chk_errors == 0) {
DP(BNX2X_MSG_IDLE,
"completed successfully (logged %d warnings)\n",
idle_chk_warnings);
} else {
BNX2X_ERR("failed (with %d errors, %d warnings)\n",
idle_chk_errors, idle_chk_warnings);
}
return idle_chk_errors;
}
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include "rtw8852a_rfk_table.h"
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
RTW89_DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
RTW89_DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
RTW89_DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
RTW89_DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
RTW89_DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
RTW89_DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
RTW89_DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
RTW89_DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
RTW89_DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
RTW89_DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
RTW89_DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
RTW89_DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
RTW89_DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
RTW89_DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
RTW89_DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
RTW89_DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
RTW89_DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
RTW89_DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
RTW89_DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
RTW89_DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
RTW89_DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
RTW89_DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
RTW89_DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
RTW89_DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
RTW89_DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
RTW89_DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
RTW89_DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
RTW89_DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
RTW89_DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
RTW89_DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
RTW89_DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
RTW89_DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
RTW89_DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
RTW89_DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
RTW89_DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
RTW89_DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
RTW89_DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
RTW89_DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
RTW89_DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
RTW89_DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
RTW89_DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
RTW89_DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
RTW89_DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
RTW89_DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
RTW89_DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
RTW89_DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
RTW89_DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
RTW89_DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
RTW89_DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
RTW89_DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
RTW89_DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
RTW89_DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
RTW89_DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
RTW89_DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
RTW89_DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
RTW89_DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
RTW89_DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
RTW89_DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
RTW89_DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
RTW89_DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
RTW89_DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
RTW89_DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
RTW89_DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
RTW89_DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
RTW89_DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
RTW89_DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
RTW89_DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
RTW89_DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
RTW89_DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
RTW89_DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
RTW89_DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
RTW89_DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
RTW89_DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
RTW89_DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
RTW89_DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
RTW89_DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
RTW89_DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
RTW89_DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
RTW89_DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
RTW89_DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
RTW89_DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x3),
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x3),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
RTW89_DECL_RFK_WC(0x12ec, 0x00008000),
RTW89_DECL_RFK_WS(0x12ec, 0x00008000),
RTW89_DECL_RFK_WC(0x5e00, 0x00000001),
RTW89_DECL_RFK_WS(0x5e00, 0x00000001),
RTW89_DECL_RFK_WC(0x32ec, 0x00008000),
RTW89_DECL_RFK_WS(0x32ec, 0x00008000),
RTW89_DECL_RFK_WC(0x7e00, 0x00000001),
RTW89_DECL_RFK_WS(0x7e00, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
RTW89_DECL_RFK_WS(0x5e10, 0x80000000),
RTW89_DECL_RFK_WS(0x5e60, 0x80000000),
RTW89_DECL_RFK_WC(0x5e00, 0x00000008),
RTW89_DECL_RFK_WC(0x5e50, 0x00000008),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
RTW89_DECL_RFK_WS(0x7e10, 0x80000000),
RTW89_DECL_RFK_WS(0x7e60, 0x80000000),
RTW89_DECL_RFK_WC(0x7e00, 0x00000008),
RTW89_DECL_RFK_WC(0x7e50, 0x00000008),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
RTW89_DECL_RFK_WC(0x12d8, 0x00000030),
RTW89_DECL_RFK_WC(0x32d8, 0x00000030),
RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
RTW89_DECL_RFK_WC(0x032c, 0x40000000),
RTW89_DECL_RFK_WC(0x032c, 0x00400000),
RTW89_DECL_RFK_WS(0x032c, 0x00400000),
RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
RTW89_DECL_RFK_WC(0x032c, 0x00010000),
RTW89_DECL_RFK_WS(0x12dc, 0x00000002),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
RTW89_DECL_RFK_WS(0x12d8, 0x000000c0),
RTW89_DECL_RFK_WS(0x12d8, 0x00000800),
RTW89_DECL_RFK_WC(0x12d8, 0x00000800),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
RTW89_DECL_RFK_WC(0x12dc, 0x00000002),
RTW89_DECL_RFK_WS(0x032c, 0x00010000),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
RTW89_DECL_RFK_WS(0x032c, 0x40000000),
RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
RTW89_DECL_RFK_WC(0x032c, 0x40000000),
RTW89_DECL_RFK_WC(0x032c, 0x00400000),
RTW89_DECL_RFK_WS(0x032c, 0x00400000),
RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
RTW89_DECL_RFK_WC(0x032c, 0x00010000),
RTW89_DECL_RFK_WS(0x32dc, 0x00000002),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
RTW89_DECL_RFK_WS(0x32d8, 0x000000c0),
RTW89_DECL_RFK_WS(0x32d8, 0x00000800),
RTW89_DECL_RFK_WC(0x32d8, 0x00000800),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
RTW89_DECL_RFK_WC(0x32dc, 0x00000002),
RTW89_DECL_RFK_WS(0x032c, 0x00010000),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
RTW89_DECL_RFK_WS(0x032c, 0x40000000),
RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
RTW89_DECL_RFK_WC(0x032c, 0x40000000),
RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
RTW89_DECL_RFK_WC(0x032c, 0x00010000),
RTW89_DECL_RFK_WS(0x12dc, 0x00000001),
RTW89_DECL_RFK_WS(0x12e8, 0x00000004),
RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
RTW89_DECL_RFK_WC(0x032c, 0x40000000),
RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
RTW89_DECL_RFK_WC(0x032c, 0x00010000),
RTW89_DECL_RFK_WS(0x32dc, 0x00000001),
RTW89_DECL_RFK_WS(0x32e8, 0x00000004),
RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
RTW89_DECL_RFK_WC(0x12dc, 0x00000001),
RTW89_DECL_RFK_WC(0x12e8, 0x00000004),
RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
RTW89_DECL_RFK_WC(0x32dc, 0x00000001),
RTW89_DECL_RFK_WC(0x32e8, 0x00000004),
RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
RTW89_DECL_RFK_WC(0x5e10, 0x80000000),
RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
RTW89_DECL_RFK_WC(0x5e60, 0x80000000),
RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
RTW89_DECL_RFK_WS(0x030c, 0x10000000),
RTW89_DECL_RFK_WC(0x032c, 0x80000000),
RTW89_DECL_RFK_WS(0x12e0, 0x00010000),
RTW89_DECL_RFK_WS(0x12e4, 0x0c000000),
RTW89_DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
RTW89_DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
RTW89_DECL_RFK_WC(0x5e00, 0x0c000000),
RTW89_DECL_RFK_WC(0x5e50, 0x0c000000),
RTW89_DECL_RFK_WC(0x5e0c, 0x00000008),
RTW89_DECL_RFK_WC(0x5e5c, 0x00000008),
RTW89_DECL_RFK_WS(0x5e0c, 0x00000001),
RTW89_DECL_RFK_WS(0x5e5c, 0x00000001),
RTW89_DECL_RFK_DELAY(1),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
RTW89_DECL_RFK_WC(0x12e4, 0x0c000000),
RTW89_DECL_RFK_WS(0x5e0c, 0x00000008),
RTW89_DECL_RFK_WS(0x5e5c, 0x00000008),
RTW89_DECL_RFK_DELAY(1),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
RTW89_DECL_RFK_WC(0x5e0c, 0x00000001),
RTW89_DECL_RFK_WC(0x5e5c, 0x00000001),
RTW89_DECL_RFK_WC(0x12e0, 0x00010000),
RTW89_DECL_RFK_WC(0x12a0, 0x00008000),
RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
RTW89_DECL_RFK_WC(0x7e10, 0x80000000),
RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
RTW89_DECL_RFK_WC(0x7e60, 0x80000000),
RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
RTW89_DECL_RFK_WS(0x030c, 0x10000000),
RTW89_DECL_RFK_WC(0x032c, 0x80000000),
RTW89_DECL_RFK_WS(0x32e0, 0x00010000),
RTW89_DECL_RFK_WS(0x32e4, 0x0c000000),
RTW89_DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
RTW89_DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
RTW89_DECL_RFK_WC(0x7e00, 0x0c000000),
RTW89_DECL_RFK_WC(0x7e50, 0x0c000000),
RTW89_DECL_RFK_WC(0x7e0c, 0x00000008),
RTW89_DECL_RFK_WC(0x7e5c, 0x00000008),
RTW89_DECL_RFK_WS(0x7e0c, 0x00000001),
RTW89_DECL_RFK_WS(0x7e5c, 0x00000001),
RTW89_DECL_RFK_DELAY(1),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
RTW89_DECL_RFK_WC(0x32e4, 0x0c000000),
RTW89_DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
RTW89_DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
RTW89_DECL_RFK_DELAY(1),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
RTW89_DECL_RFK_WC(0x7e0c, 0x00000001),
RTW89_DECL_RFK_WC(0x7e5c, 0x00000001),
RTW89_DECL_RFK_WC(0x32e0, 0x00010000),
RTW89_DECL_RFK_WC(0x32a0, 0x00008000),
RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
RTW89_DECL_RFK_WS(0x0700, 0x08000000),
RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
RTW89_DECL_RFK_WS(0x4490, 0x80000000),
RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
RTW89_DECL_RFK_WS(0x0700, 0x01000000),
RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
RTW89_DECL_RFK_WS(0x32b8, 0x10000000),
RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WS(0x2c1c, 0x00000004),
RTW89_DECL_RFK_WS(0x2700, 0x08000000),
RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
RTW89_DECL_RFK_WS(0x2c3c, 0x00000200),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
RTW89_DECL_RFK_WS(0x6490, 0x80000000),
RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
RTW89_DECL_RFK_WS(0x2700, 0x01000000),
RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
RTW89_DECL_RFK_WS(0x0700, 0x08000000),
RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
RTW89_DECL_RFK_WS(0x2344, 0x80000000),
RTW89_DECL_RFK_WS(0x4490, 0x80000000),
RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
RTW89_DECL_RFK_WS(0x0700, 0x01000000),
RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
RTW89_DECL_RFK_WC(0x0700, 0x08000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
RTW89_DECL_RFK_WC(0x0700, 0x07000000),
RTW89_DECL_RFK_WC(0x5864, 0x20000000),
RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WC(0x2c1c, 0x00000004),
RTW89_DECL_RFK_WC(0x2700, 0x08000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
RTW89_DECL_RFK_WC(0x2700, 0x07000000),
RTW89_DECL_RFK_WC(0x7864, 0x20000000),
RTW89_DECL_RFK_WC(0x2c3c, 0x00000200),
RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
RTW89_DECL_RFK_WC(0x0700, 0x08000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
RTW89_DECL_RFK_WC(0x0700, 0x07000000),
RTW89_DECL_RFK_WC(0x5864, 0x20000000),
RTW89_DECL_RFK_WC(0x7864, 0x20000000),
RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
RTW89_DECL_RFK_WS(0x8074, 0x80000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
RTW89_DECL_RFK_WC(0x8074, 0x80000000),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
RTW89_DECL_RFK_DELAY(1),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
RTW89_DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
RTW89_DECL_RFK_WC(0x80bc, 0x00004000),
RTW89_DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
RTW89_DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
RTW89_DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
RTW89_DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
RTW89_DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
RTW89_DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
RTW89_DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
};
RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/sprd-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define SPRD_SPI_TXD 0x0
#define SPRD_SPI_CLKD 0x4
#define SPRD_SPI_CTL0 0x8
#define SPRD_SPI_CTL1 0xc
#define SPRD_SPI_CTL2 0x10
#define SPRD_SPI_CTL3 0x14
#define SPRD_SPI_CTL4 0x18
#define SPRD_SPI_CTL5 0x1c
#define SPRD_SPI_INT_EN 0x20
#define SPRD_SPI_INT_CLR 0x24
#define SPRD_SPI_INT_RAW_STS 0x28
#define SPRD_SPI_INT_MASK_STS 0x2c
#define SPRD_SPI_STS1 0x30
#define SPRD_SPI_STS2 0x34
#define SPRD_SPI_DSP_WAIT 0x38
#define SPRD_SPI_STS3 0x3c
#define SPRD_SPI_CTL6 0x40
#define SPRD_SPI_STS4 0x44
#define SPRD_SPI_FIFO_RST 0x48
#define SPRD_SPI_CTL7 0x4c
#define SPRD_SPI_STS5 0x50
#define SPRD_SPI_CTL8 0x54
#define SPRD_SPI_CTL9 0x58
#define SPRD_SPI_CTL10 0x5c
#define SPRD_SPI_CTL11 0x60
#define SPRD_SPI_CTL12 0x64
#define SPRD_SPI_STS6 0x68
#define SPRD_SPI_STS7 0x6c
#define SPRD_SPI_STS8 0x70
#define SPRD_SPI_STS9 0x74
/* Bits & mask definition for register CTL0 */
#define SPRD_SPI_SCK_REV BIT(13)
#define SPRD_SPI_NG_TX BIT(1)
#define SPRD_SPI_NG_RX BIT(0)
#define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
#define SPRD_SPI_CSN_MASK GENMASK(11, 8)
#define SPRD_SPI_CS0_VALID BIT(8)
/* Bits & mask definition for register SPI_INT_EN */
#define SPRD_SPI_TX_END_INT_EN BIT(8)
#define SPRD_SPI_RX_END_INT_EN BIT(9)
/* Bits & mask definition for register SPI_INT_RAW_STS */
#define SPRD_SPI_TX_END_RAW BIT(8)
#define SPRD_SPI_RX_END_RAW BIT(9)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_TX_END_CLR BIT(8)
#define SPRD_SPI_RX_END_CLR BIT(9)
/* Bits & mask definition for register INT_MASK_STS */
#define SPRD_SPI_MASK_RX_END BIT(9)
#define SPRD_SPI_MASK_TX_END BIT(8)
/* Bits & mask definition for register STS2 */
#define SPRD_SPI_TX_BUSY BIT(8)
/* Bits & mask definition for register CTL1 */
#define SPRD_SPI_RX_MODE BIT(12)
#define SPRD_SPI_TX_MODE BIT(13)
#define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
/* Bits & mask definition for register CTL2 */
#define SPRD_SPI_DMA_EN BIT(6)
/* Bits & mask definition for register CTL4 */
#define SPRD_SPI_START_RX BIT(9)
#define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_RX_END_INT_CLR BIT(9)
#define SPRD_SPI_TX_END_INT_CLR BIT(8)
/* Bits & mask definition for register SPI_INT_RAW */
#define SPRD_SPI_RX_END_IRQ BIT(9)
#define SPRD_SPI_TX_END_IRQ BIT(8)
/* Bits & mask definition for register CTL12 */
#define SPRD_SPI_SW_RX_REQ BIT(0)
#define SPRD_SPI_SW_TX_REQ BIT(1)
/* Bits & mask definition for register CTL7 */
#define SPRD_SPI_DATA_LINE2_EN BIT(15)
#define SPRD_SPI_MODE_MASK GENMASK(5, 3)
#define SPRD_SPI_MODE_OFFSET 3
#define SPRD_SPI_3WIRE_MODE 4
#define SPRD_SPI_4WIRE_MODE 0
/* Bits & mask definition for register CTL8 */
#define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_TX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL9 */
#define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
/* Bits & mask definition for register CTL10 */
#define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_RX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL11 */
#define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
/* Default & maximum word delay cycles */
#define SPRD_SPI_MIN_DELAY_CYCLE 14
#define SPRD_SPI_MAX_DELAY_CYCLE 130
#define SPRD_SPI_FIFO_SIZE 32
#define SPRD_SPI_CHIP_CS_NUM 0x4
#define SPRD_SPI_CHNL_LEN 2
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
#define SPRD_SPI_DMA_STEP 8
enum sprd_spi_dma_channel {
SPRD_SPI_RX,
SPRD_SPI_TX,
SPRD_SPI_MAX,
};
struct sprd_spi_dma {
bool enable;
struct dma_chan *dma_chan[SPRD_SPI_MAX];
enum dma_slave_buswidth width;
u32 fragmens_len;
u32 rx_len;
};
struct sprd_spi {
void __iomem *base;
phys_addr_t phy_base;
struct device *dev;
struct clk *clk;
int irq;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
u32 trans_mode;
u32 word_delay;
u32 hw_speed_hz;
u32 len;
int status;
struct sprd_spi_dma dma;
struct completion xfer_completion;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
int (*write_bufs)(struct sprd_spi *ss, u32 len);
};
static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
struct spi_transfer *t)
{
/*
* The time spent on transmission of the full FIFO data is the maximum
* SPI transmission time.
*/
u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
u32 total_time_us = size * bit_time_us;
/*
* There is an interval between data and the data in our SPI hardware,
* so the total transmission time need add the interval time.
*/
u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
ss->src_clk);
return total_time_us + interval_time_us;
}
static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_TX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi send timeout!\n");
return ret;
}
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
!(val & SPRD_SPI_TX_BUSY), 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi busy timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_RX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi rx timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static void sprd_spi_tx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_rx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_enter_idle(struct sprd_spi *ss)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
}
static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* Set the valid bits for every transaction */
val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
val |= bits << SPRD_SPI_CHNL_LEN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
length &= SPRD_SPI_TX_MAX_LEN_MASK;
val &= ~SPRD_SPI_TX_LEN_H_MASK;
val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
val = length & SPRD_SPI_TX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
}
static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
length &= SPRD_SPI_RX_MAX_LEN_MASK;
val &= ~SPRD_SPI_RX_LEN_H_MASK;
val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
val = length & SPRD_SPI_RX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
}
static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
{
struct spi_controller *sctlr = sdev->controller;
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
u32 val;
val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* The SPI controller will pull down CS pin if cs is 0 */
if (!cs) {
val &= ~SPRD_SPI_CS0_VALID;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
} else {
val |= SPRD_SPI_CSN_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
}
static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
{
u32 val;
/* Clear the start receive bit and reset receive data number */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Set the receive data length */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= len & SPRD_SPI_ONLY_RECV_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Trigger to receive data */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= SPRD_SPI_START_RX;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
return len;
}
static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *tx_p = (u8 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i;
return i;
}
static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *tx_p = (u16 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 1;
return i << 1;
}
static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *tx_p = (u32 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 2;
return i << 2;
}
static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *rx_p = (u8 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i;
return i;
}
static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *rx_p = (u16 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 1;
return i << 1;
}
static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *rx_p = (u32 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 2;
return i << 2;
}
static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len, len;
int ret, write_size = 0, read_size = 0;
while (trans_len) {
len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
trans_len;
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
sprd_spi_set_tx_length(ss, len);
write_size += ss->write_bufs(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
ret = sprd_spi_wait_for_tx_end(ss, t);
} else {
sprd_spi_set_rx_length(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size += ss->write_bufs(ss, len);
ret = sprd_spi_wait_for_rx_end(ss, t);
}
if (ret)
goto complete;
if (ss->trans_mode & SPRD_SPI_RX_MODE)
read_size += ss->read_bufs(ss, len);
trans_len -= len;
}
if (ss->trans_mode & SPRD_SPI_TX_MODE)
ret = write_size;
else
ret = read_size;
complete:
sprd_spi_enter_idle(ss);
return ret;
}
static void sprd_spi_irq_enable(struct sprd_spi *ss)
{
u32 val;
/* Clear interrupt status before enabling interrupt. */
writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
ss->base + SPRD_SPI_INT_CLR);
/* Enable SPI interrupt only in DMA mode. */
val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
SPRD_SPI_RX_END_INT_EN,
ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_irq_disable(struct sprd_spi *ss)
{
writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
if (enable)
val |= SPRD_SPI_DMA_EN;
else
val &= ~SPRD_SPI_DMA_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
}
static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
struct dma_slave_config *c,
struct sg_table *sg,
enum dma_transfer_direction dir)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
unsigned long flags;
int ret;
ret = dmaengine_slave_config(dma_chan, c);
if (ret < 0)
return ret;
flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
if (!desc)
return -ENODEV;
cookie = dmaengine_submit(desc);
if (dma_submit_error(cookie))
return dma_submit_error(cookie);
dma_async_issue_pending(dma_chan);
return 0;
}
static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
struct dma_slave_config config = {
.src_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.dst_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
if (ret)
return ret;
return ss->dma.rx_len;
}
static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
struct dma_slave_config config = {
.dst_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.src_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
if (ret)
return ret;
return t->len;
}
static int sprd_spi_dma_request(struct sprd_spi *ss)
{
ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
"request RX DMA channel failed!\n");
ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
"request TX DMA channel failed!\n");
}
return 0;
}
static void sprd_spi_dma_release(struct sprd_spi *ss)
{
if (ss->dma.dma_chan[SPRD_SPI_RX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
if (ss->dma.dma_chan[SPRD_SPI_TX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
}
static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len;
int ret, write_size = 0;
reinit_completion(&ss->xfer_completion);
sprd_spi_irq_enable(ss);
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
write_size = sprd_spi_dma_tx_config(ss, t);
sprd_spi_set_tx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
} else {
sprd_spi_set_rx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size = ss->write_bufs(ss, trans_len);
}
if (write_size < 0) {
ret = write_size;
dev_err(ss->dev, "failed to write, ret = %d\n", ret);
goto trans_complete;
}
if (ss->trans_mode & SPRD_SPI_RX_MODE) {
/*
* Set up the DMA receive data length, which must be an
* integral multiple of fragment length. But when the length
* of received data is less than fragment length, DMA can be
* configured to receive data according to the actual length
* of received data.
*/
ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
(t->len - t->len % ss->dma.fragmens_len) :
t->len;
ret = sprd_spi_dma_rx_config(ss, t);
if (ret < 0) {
dev_err(&sdev->dev,
"failed to configure rx DMA, ret = %d\n", ret);
goto trans_complete;
}
}
sprd_spi_dma_enable(ss, true);
wait_for_completion(&(ss->xfer_completion));
if (ss->trans_mode & SPRD_SPI_TX_MODE)
ret = write_size;
else
ret = ss->dma.rx_len;
trans_complete:
sprd_spi_dma_enable(ss, false);
sprd_spi_enter_idle(ss);
sprd_spi_irq_disable(ss);
return ret;
}
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
* From SPI datasheet, the prescale calculation formula:
* prescale = SPI source clock / (2 * SPI_freq) - 1;
*/
u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
/* Save the real hardware speed */
ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
}
static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
{
struct spi_delay *d = &t->word_delay;
u16 word_delay, interval;
u32 val;
if (d->unit != SPI_DELAY_UNIT_SCK)
return -EINVAL;
val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
/*
* Set the intervals of two SPI frames, and the inteval calculation
* formula as below per datasheet:
* interval time (source clock cycles) = interval * 4 + 10.
*/
word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
SPRD_SPI_MAX_DELAY_CYCLE);
interval = DIV_ROUND_UP(word_delay - 10, 4);
ss->word_delay = interval * 4 + 10;
writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
/* Reset SPI fifo */
writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
/* Set SPI work mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~SPRD_SPI_MODE_MASK;
if (ss->hw_mode & SPI_3WIRE)
val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
else
val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
if (ss->hw_mode & SPI_TX_DUAL)
val |= SPRD_SPI_DATA_LINE2_EN;
else
val &= ~SPRD_SPI_DATA_LINE2_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
return 0;
}
static int sprd_spi_setup_transfer(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u8 bits_per_word = t->bits_per_word;
u32 val, mode = 0;
int ret;
ss->len = t->len;
ss->tx_buf = t->tx_buf;
ss->rx_buf = t->rx_buf;
ss->hw_mode = sdev->mode;
ret = sprd_spi_init_hw(ss, t);
if (ret)
return ret;
/* Set transfer speed and valid bits */
sprd_spi_set_speed(ss, t->speed_hz);
sprd_spi_set_transfer_bits(ss, bits_per_word);
if (bits_per_word > 16)
bits_per_word = round_up(bits_per_word, 16);
else
bits_per_word = round_up(bits_per_word, 8);
switch (bits_per_word) {
case 8:
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
break;
default:
return -EINVAL;
}
/* Set transfer read or write mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
if (t->tx_buf)
mode |= SPRD_SPI_TX_MODE;
if (t->rx_buf)
mode |= SPRD_SPI_RX_MODE;
writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
ss->trans_mode = mode;
/*
* If in only receive mode, we need to trigger the SPI controller to
* receive data automatically.
*/
if (ss->trans_mode == SPRD_SPI_RX_MODE)
ss->write_bufs = sprd_spi_write_only_receive;
return 0;
}
static int sprd_spi_transfer_one(struct spi_controller *sctlr,
struct spi_device *sdev,
struct spi_transfer *t)
{
int ret;
ret = sprd_spi_setup_transfer(sdev, t);
if (ret)
goto setup_err;
if (sctlr->can_dma(sctlr, sdev, t))
ret = sprd_spi_dma_txrx_bufs(sdev, t);
else
ret = sprd_spi_txrx_bufs(sdev, t);
if (ret == t->len)
ret = 0;
else if (ret >= 0)
ret = -EREMOTEIO;
setup_err:
spi_finalize_current_transfer(sctlr);
return ret;
}
static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
{
struct sprd_spi *ss = (struct sprd_spi *)data;
u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
if (val & SPRD_SPI_MASK_TX_END) {
writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
if (val & SPRD_SPI_MASK_RX_END) {
writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (ss->dma.rx_len < ss->len) {
ss->rx_buf += ss->dma.rx_len;
ss->dma.rx_len +=
ss->read_bufs(ss, ss->len - ss->dma.rx_len);
}
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ss->irq = platform_get_irq(pdev, 0);
if (ss->irq < 0)
return ss->irq;
ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
0, pdev->name, ss);
if (ret)
dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
ss->irq, ret);
return ret;
}
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
clk_spi = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(clk_spi)) {
dev_warn(&pdev->dev, "can't get the spi clock\n");
clk_spi = NULL;
}
clk_parent = devm_clk_get(&pdev->dev, "source");
if (IS_ERR(clk_parent)) {
dev_warn(&pdev->dev, "can't get the source clock\n");
clk_parent = NULL;
}
ss->clk = devm_clk_get(&pdev->dev, "enable");
if (IS_ERR(ss->clk)) {
dev_err(&pdev->dev, "can't get the enable clock\n");
return PTR_ERR(ss->clk);
}
if (!clk_set_parent(clk_spi, clk_parent))
ss->src_clk = clk_get_rate(clk_spi);
else
ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
return 0;
}
static bool sprd_spi_can_dma(struct spi_controller *sctlr,
struct spi_device *spi, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
}
static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ret = sprd_spi_dma_request(ss);
if (ret) {
if (ret == -EPROBE_DEFER)
return ret;
dev_warn(&pdev->dev,
"failed to request dma, enter no dma mode, ret = %d\n",
ret);
return 0;
}
ss->dma.enable = true;
return 0;
}
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
struct resource *res;
struct sprd_spi *ss;
int ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
sctlr = spi_alloc_host(&pdev->dev, sizeof(*ss));
if (!sctlr)
return -ENOMEM;
ss = spi_controller_get_devdata(sctlr);
ss->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ss->base)) {
ret = PTR_ERR(ss->base);
goto free_controller;
}
ss->phy_base = res->start;
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
sctlr->can_dma = sprd_spi_can_dma;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
init_completion(&ss->xfer_completion);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
ret = sprd_spi_irq_init(pdev, ss);
if (ret)
goto free_controller;
ret = sprd_spi_dma_init(pdev, ss);
if (ret)
goto free_controller;
ret = clk_prepare_enable(ss->clk);
if (ret)
goto release_dma;
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
pm_runtime_set_autosuspend_delay(&pdev->dev,
SPRD_SPI_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to resume SPI controller\n");
goto err_rpm_put;
}
ret = devm_spi_register_controller(&pdev->dev, sctlr);
if (ret)
goto err_rpm_put;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_rpm_put:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
release_dma:
sprd_spi_dma_release(ss);
free_controller:
spi_controller_put(sctlr);
return ret;
}
static void sprd_spi_remove(struct platform_device *pdev)
{
struct spi_controller *sctlr = platform_get_drvdata(pdev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = pm_runtime_get_sync(ss->dev);
if (ret < 0)
dev_err(ss->dev, "failed to resume SPI controller\n");
spi_controller_suspend(sctlr);
if (ret >= 0) {
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
return 0;
}
static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = clk_prepare_enable(ss->clk);
if (ret)
return ret;
if (!ss->dma.enable)
return 0;
ret = sprd_spi_dma_request(ss);
if (ret)
clk_disable_unprepare(ss->clk);
return ret;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {
SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
sprd_spi_runtime_resume, NULL)
};
static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
static struct platform_driver sprd_spi_driver = {
.driver = {
.name = "sprd-spi",
.of_match_table = sprd_spi_of_match,
.pm = &sprd_spi_pm_ops,
},
.probe = sprd_spi_probe,
.remove = sprd_spi_remove,
};
module_platform_driver(sprd_spi_driver);
MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
MODULE_AUTHOR("Lanqing Liu <[email protected]>");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers.c
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
* Copyright (c) 2001-2023 Helge Deller <[email protected]>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <[email protected]>
*
* The file handles registering devices and drivers, then matching them.
* It's the closest we get to a dating agency.
*
* If you're thinking about modifying this file, here are some gotchas to
* bear in mind:
* - 715/Mirage device paths have a dummy device between Lasi and its children
* - The EISA adapter may show up as a sibling or child of Wax
* - Dino has an optionally functional serial port. If firmware enables it,
* it shows up as a child of Dino. If firmware disables it, the buswalk
* finds it and it shows up as a child of Cujo
* - Dino has both parisc and pci devices as children
* - parisc devices are discovered in a random order, including children
* before parents in some cases.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/dma-map-ops.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
#include <asm/ropes.h>
/* See comments in include/asm-parisc/pci.h */
const struct dma_map_ops *hppa_dma_ops __ro_after_init;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
.init_name = "parisc",
};
static inline int check_dev(struct device *dev)
{
if (dev->bus == &parisc_bus_type) {
struct parisc_device *pdev;
pdev = to_parisc_device(dev);
return pdev->id.hw_type != HPHW_FAULTY;
}
return 1;
}
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath);
struct recurse_struct {
void * obj;
int (*fn)(struct device *, void *);
};
static int descend_children(struct device * dev, void * data)
{
struct recurse_struct * recurse_data = (struct recurse_struct *)data;
if (recurse_data->fn(dev, recurse_data->obj))
return 1;
else
return device_for_each_child(dev, recurse_data, descend_children);
}
/**
* for_each_padev - Iterate over all devices in the tree
* @fn: Function to call for each device.
* @data: Data to pass to the called function.
*
* This performs a depth-first traversal of the tree, calling the
* function passed for each node. It calls the function for parents
* before children.
*/
static int for_each_padev(int (*fn)(struct device *, void *), void * data)
{
struct recurse_struct recurse_data = {
.obj = data,
.fn = fn,
};
return device_for_each_child(&root, &recurse_data, descend_children);
}
/**
* match_device - Report whether this driver can handle this device
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
static int match_device(const struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
for (ids = driver->id_table; ids->sversion; ids++) {
if ((ids->sversion != SVERSION_ANY_ID) &&
(ids->sversion != dev->id.sversion))
continue;
if ((ids->hw_type != HWTYPE_ANY_ID) &&
(ids->hw_type != dev->id.hw_type))
continue;
if ((ids->hversion != HVERSION_ANY_ID) &&
(ids->hversion != dev->id.hversion))
continue;
return 1;
}
return 0;
}
static int parisc_driver_probe(struct device *dev)
{
int rc;
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
rc = pa_drv->probe(pa_dev);
if (!rc)
pa_dev->driver = pa_drv;
return rc;
}
static void __exit parisc_driver_remove(struct device *dev)
{
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
if (pa_drv->remove)
pa_drv->remove(pa_dev);
}
/**
* register_parisc_driver - Register this driver if it can handle a device
* @driver: the PA-RISC driver to try
*/
int register_parisc_driver(struct parisc_driver *driver)
{
/* FIXME: we need this because apparently the sti
* driver can be registered twice */
if (driver->drv.name) {
pr_warn("BUG: skipping previously registered driver %s\n",
driver->name);
return 1;
}
if (!driver->probe) {
pr_warn("BUG: driver %s has no probe routine\n", driver->name);
return 1;
}
driver->drv.bus = &parisc_bus_type;
/* We install our own probe and remove routines */
WARN_ON(driver->drv.probe != NULL);
WARN_ON(driver->drv.remove != NULL);
driver->drv.name = driver->name;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(register_parisc_driver);
struct match_count {
struct parisc_driver * driver;
int count;
};
static int match_and_count(struct device * dev, void * data)
{
struct match_count * m = data;
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev)) {
if (match_device(m->driver, pdev))
m->count++;
}
return 0;
}
/**
* count_parisc_driver - count # of devices this driver would match
* @driver: the PA-RISC driver to try
*
* Use by IOMMU support to "guess" the right size IOPdir.
* Formula is something like memsize/(num_iommu * entry_size).
*/
int __init count_parisc_driver(struct parisc_driver *driver)
{
struct match_count m = {
.driver = driver,
.count = 0,
};
for_each_padev(match_and_count, &m);
return m.count;
}
/**
* unregister_parisc_driver - Unregister this driver from the list of drivers
* @driver: the PA-RISC driver to unregister
*/
int unregister_parisc_driver(struct parisc_driver *driver)
{
driver_unregister(&driver->drv);
return 0;
}
EXPORT_SYMBOL(unregister_parisc_driver);
struct find_data {
unsigned long hpa;
struct parisc_device * dev;
};
static int find_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct find_data * d = (struct find_data*)data;
if (check_dev(dev)) {
if (pdev->hpa.start == d->hpa) {
d->dev = pdev;
return 1;
}
}
return 0;
}
static struct parisc_device *find_device_by_addr(unsigned long hpa)
{
struct find_data d = {
.hpa = hpa,
};
int ret;
ret = for_each_padev(find_device, &d);
return ret ? d.dev : NULL;
}
static int __init is_IKE_device(struct device *dev, void *data)
{
struct parisc_device *pdev = to_parisc_device(dev);
if (!check_dev(dev))
return 0;
if (pdev->id.hw_type != HPHW_BCPORT)
return 0;
if (IS_IKE(pdev) ||
(pdev->id.hversion == REO_MERCED_PORT) ||
(pdev->id.hversion == REOG_MERCED_PORT)) {
return 1;
}
return 0;
}
int __init machine_has_merced_bus(void)
{
int ret;
ret = for_each_padev(is_IKE_device, NULL);
return ret ? 1 : 0;
}
/**
* find_pa_parent_type - Find a parent of a specific type
* @padev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
* If it finds it, it returns it. If not, it returns NULL.
*/
const struct parisc_device *
find_pa_parent_type(const struct parisc_device *padev, int type)
{
const struct device *dev = &padev->dev;
while (dev != &root) {
struct parisc_device *candidate = to_parisc_device(dev);
if (candidate->id.hw_type == type)
return candidate;
dev = dev->parent;
}
return NULL;
}
/*
* get_node_path fills in @path with the firmware path to the device.
* Note that if @node is a parisc device, we don't fill in the 'mod' field.
* This is because both callers pass the parent and fill in the mod
* themselves. If @node is a PCI device, we do fill it in, even though this
* is inconsistent.
*/
static void get_node_path(struct device *dev, struct hardware_path *path)
{
int i = 5;
memset(&path->bc, -1, 6);
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->mod = PCI_FUNC(devfn);
path->bc[i--] = PCI_SLOT(devfn);
dev = dev->parent;
}
while (dev != &root) {
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
} else if (dev->bus == &parisc_bus_type) {
path->bc[i--] = to_parisc_device(dev)->hw_path;
}
dev = dev->parent;
}
}
static char *print_hwpath(struct hardware_path *path, char *output)
{
int i;
for (i = 0; i < 6; i++) {
if (path->bc[i] == -1)
continue;
output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
}
output += sprintf(output, "%u", (unsigned char) path->mod);
return output;
}
/**
* print_pa_hwpath - Returns hardware path for PA devices
* @dev: The device to return the path for
* @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pa_hwpath(struct parisc_device *dev, char *output)
{
struct hardware_path path;
get_node_path(dev->dev.parent, &path);
path.mod = dev->hw_path;
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pa_hwpath);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
/**
* get_pci_node_path - Determines the hardware path for a PCI device
* @pdev: The device to return the path for
* @path: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the hardware_path structure with the route to
* the specified PCI device. This structure is suitable for passing to
* PDC calls.
*/
void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
{
get_node_path(&pdev->dev, path);
}
EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
* @dev: The device to return the path for
* @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pci_hwpath(struct pci_dev *dev, char *output)
{
struct hardware_path path;
get_pci_node_path(dev, &path);
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pci_hwpath);
#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
char name[28];
char *output = name;
int i;
get_node_path(padev->dev.parent, &path);
for (i = 0; i < 6; i++) {
if (path.bc[i] == -1)
continue;
output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
}
sprintf(output, "%u", (unsigned char) padev->hw_path);
dev_set_name(&padev->dev, name);
}
static struct parisc_device * __init create_tree_node(char id,
struct device *parent)
{
struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->hw_path = id;
dev->id.hw_type = HPHW_FAULTY;
dev->dev.parent = parent;
setup_bus_id(dev);
dev->dev.bus = &parisc_bus_type;
dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
/* make the generic dma mask a pointer to the parisc one */
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
if (device_register(&dev->dev)) {
kfree(dev);
return NULL;
}
return dev;
}
struct match_id_data {
char id;
struct parisc_device * dev;
};
static int match_by_id(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct match_id_data * d = data;
if (pdev->hw_path == d->id) {
d->dev = pdev;
return 1;
}
return 0;
}
/**
* alloc_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @id: the element of the module path for this entry
*
* Checks all the children of @parent for a matching @id. If none
* found, it allocates a new device and returns it.
*/
static struct parisc_device * __init alloc_tree_node(
struct device *parent, char id)
{
struct match_id_data d = {
.id = id,
};
if (device_for_each_child(parent, &d, match_by_id))
return d.dev;
else
return create_tree_node(id, parent);
}
static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
}
return alloc_tree_node(parent, modpath->mod);
}
struct parisc_device * __init
alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
{
int status;
unsigned long bytecnt;
u8 iodc_data[32];
struct parisc_device *dev;
const char *name;
/* Check to make sure this device has not already been added - Ryan */
if (find_device_by_addr(hpa) != NULL)
return NULL;
status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
if (status != PDC_OK)
return NULL;
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
pr_err("Two devices have hardware path [%s]. IODC data for second device: %7phN\n"
"Rearranging GSC cards sometimes helps\n",
parisc_pathname(dev), iodc_data);
return NULL;
}
dev->id.hw_type = iodc_data[3] & 0x1f;
dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
dev->hpa.start = hpa;
/* This is awkward. The STI spec says that gfx devices may occupy
* 32MB or 64MB. Unfortunately, we don't know how to tell whether
* it's the former or the latter. Assumptions either way can hurt us.
*/
if (hpa == 0xf4000000 || hpa == 0xf8000000) {
dev->hpa.end = hpa + 0x03ffffff;
} else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
dev->hpa.end = hpa + 0x01ffffff;
} else {
dev->hpa.end = hpa + 0xfff;
}
dev->hpa.flags = IORESOURCE_MEM;
dev->hpa.name = dev->name;
name = parisc_hardware_description(&dev->id) ? : "unknown";
snprintf(dev->name, sizeof(dev->name), "%s [%s]",
name, parisc_pathname(dev));
/* Silently fail things like mouse ports which are subsumed within
* the keyboard controller
*/
if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
return dev;
}
static int parisc_generic_match(struct device *dev, const struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
static ssize_t make_modalias(const struct device *dev, char *buf)
{
const struct parisc_device *padev = to_parisc_device(dev);
const struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
}
static int parisc_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct parisc_device *padev;
char modalias[40];
if (!dev)
return -ENODEV;
padev = to_parisc_device(dev);
if (!padev)
return -ENODEV;
if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
return -ENOMEM;
make_modalias(dev, modalias);
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct parisc_device *padev = to_parisc_device(dev); \
return sprintf(buf, format_string, padev->field); \
} \
static DEVICE_ATTR_RO(name);
#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
pa_dev_attr(irq, irq, "%u\n");
pa_dev_attr_id(hw_type, "0x%02x\n");
pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
pa_dev_attr_id(hversion, "0x%03x\n");
pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return make_modalias(dev, buf);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *parisc_device_attrs[] = {
&dev_attr_irq.attr,
&dev_attr_hw_type.attr,
&dev_attr_rev.attr,
&dev_attr_hversion.attr,
&dev_attr_sversion.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(parisc_device);
const struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.uevent = parisc_uevent,
.dev_groups = parisc_device_groups,
.probe = parisc_driver_probe,
.remove = __exit_p(parisc_driver_remove),
};
/**
* register_parisc_device - Locate a driver to manage this device.
* @dev: The parisc device.
*
* Search the driver list for a driver that is willing to manage
* this device.
*/
int __init register_parisc_device(struct parisc_device *dev)
{
if (!dev)
return 0;
if (dev->driver)
return 1;
return 0;
}
/**
* match_pci_device - Matches a pci device against a given hardware path
* entry.
* @dev: the generic device (known to be contained by a pci_dev).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_pci_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct pci_dev *pdev = to_pci_dev(dev);
int id;
if (index == 5) {
/* we are at the end of the path, and on the actual device */
unsigned int devfn = pdev->devfn;
return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
(modpath->mod == PCI_FUNC(devfn)));
}
/* index might be out of bounds for bc[] */
if (index >= 6)
return 0;
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
/**
* match_parisc_device - Matches a parisc device against a given hardware
* path entry.
* @dev: the generic device (known to be contained by a parisc_device).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_parisc_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct parisc_device *curr = to_parisc_device(dev);
char id = (index == 6) ? modpath->mod : modpath->bc[index];
return (curr->hw_path == id);
}
struct parse_tree_data {
int index;
struct hardware_path * modpath;
struct device * dev;
};
static int check_parent(struct device * dev, void * data)
{
struct parse_tree_data * d = data;
if (check_dev(dev)) {
if (dev->bus == &parisc_bus_type) {
if (match_parisc_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev_is_pci(dev)) {
if (match_pci_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev->bus == NULL) {
/* we are on a bus bridge */
struct device *new = parse_tree_node(dev, d->index, d->modpath);
if (new)
d->dev = new;
}
}
return d->dev != NULL;
}
/**
* parse_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @index: the current BC index
* @modpath: the hardware_path struct to match a device against
* @return: The corresponding device if found, NULL otherwise.
*
* Checks all the children of @parent for a matching @id. If none
* found, it returns NULL.
*/
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
{
struct parse_tree_data d = {
.index = index,
.modpath = modpath,
};
struct recurse_struct recurse_data = {
.obj = &d,
.fn = check_parent,
};
if (device_for_each_child(parent, &recurse_data, descend_children))
{ /* nothing */ }
return d.dev;
}
/**
* hwpath_to_device - Finds the generic device corresponding to a given hardware path.
* @modpath: the hardware path.
* @return: The target device, NULL if not found.
*/
struct device *hwpath_to_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = parse_tree_node(parent, i, modpath);
if (!parent)
return NULL;
}
if (dev_is_pci(parent)) /* pci devices already parse MOD */
return parent;
else
return parse_tree_node(parent, 6, modpath);
}
EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
* @dev: the target device
* @path: pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
struct parisc_device *padev;
if (dev->bus == &parisc_bus_type) {
padev = to_parisc_device(dev);
get_node_path(dev->parent, path);
path->mod = padev->hw_path;
} else if (dev_is_pci(dev)) {
get_node_path(dev, path);
}
}
EXPORT_SYMBOL(device_to_hwpath);
#define BC_PORT_MASK 0x8
#define BC_LOWER_PORT 0x8
#define BUS_CONVERTER(dev) \
((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
#define IS_LOWER_PORT(dev) \
((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \
& BC_PORT_MASK) == BC_LOWER_PORT)
#define MAX_NATIVE_DEVICES 64
#define NATIVE_DEVICE_OFFSET 0x1000
#define FLEX_MASK F_EXTEND(0xfffc0000)
#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW)
#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH)
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
static void __init walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;
if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
return;
if (dev->id.hw_type == HPHW_IOA) {
io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
} else {
io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
}
walk_native_bus(io_io_low, io_io_high, &dev->dev);
}
/**
* walk_native_bus -- Probe a bus for devices
* @io_io_low: Base address of this bus.
* @io_io_high: Last address of this bus.
* @parent: The parent bus device.
*
* A native bus (eg Runway or GSC) may have up to 64 devices on it,
* spaced at intervals of 0x1000 bytes. PDC may not inform us of these
* devices, so we have to probe for them. Unfortunately, we may find
* devices which are not physically connected (such as extra serial &
* keyboard ports). This problem is not yet solved.
*/
static void __init walk_native_bus(unsigned long io_io_low,
unsigned long io_io_high, struct device *parent)
{
int i, devices_found = 0;
unsigned long hpa = io_io_low;
struct hardware_path path;
get_node_path(parent, &path);
do {
for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
struct parisc_device *dev;
/* Was the device already added by Firmware? */
dev = find_device_by_addr(hpa);
if (!dev) {
path.mod = i;
dev = alloc_pa_dev(hpa, &path);
if (!dev)
continue;
register_parisc_device(dev);
devices_found++;
}
walk_lower_bus(dev);
}
} while(!devices_found && hpa < io_io_high);
}
#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
/**
* walk_central_bus - Find devices attached to the central bus
*
* PDC doesn't tell us about all devices in the system. This routine
* finds devices connected to the central bus.
*/
void __init walk_central_bus(void)
{
walk_native_bus(CENTRAL_BUS_ADDR,
CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
&root);
}
static __init void print_parisc_device(struct parisc_device *dev)
{
static int count __initdata;
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
pr_cont("0x%lx ", dev->addr[k]);
}
pr_cont("\n");
}
/**
* init_parisc_bus - Some preparation to be done before inventory
*/
void __init init_parisc_bus(void)
{
if (bus_register(&parisc_bus_type))
panic("Could not register PA-RISC bus type\n");
if (device_register(&root))
panic("Could not register PA-RISC root device\n");
get_device(&root);
}
static __init void qemu_header(void)
{
int num;
unsigned long *p;
pr_info("--- cut here ---\n");
pr_info("/* AUTO-GENERATED HEADER FILE FOR SEABIOS FIRMWARE */\n");
pr_cont("/* generated with Linux kernel */\n");
pr_cont("/* search for PARISC_QEMU_MACHINE_HEADER in Linux */\n\n");
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]);
#undef p
pr_info("#define PARISC_PDC_VERSION 0x%04lx\n\n",
boot_cpu_data.pdc.versions);
pr_info("#define PARISC_PDC_CPUID 0x%04lx\n\n",
boot_cpu_data.pdc.cpuid);
pr_info("#define PARISC_PDC_CAPABILITIES 0x%04lx\n\n",
boot_cpu_data.pdc.capabilities);
pr_info("#define PARISC_PDC_ENTRY_ORG 0x%04lx\n\n",
#ifdef CONFIG_64BIT
(unsigned long)(PAGE0->mem_pdc_hi) << 32 |
#endif
(unsigned long)PAGE0->mem_pdc);
pr_info("#define PARISC_PDC_CACHE_INFO");
p = (unsigned long *) &cache_info;
for (num = 0; num < sizeof(cache_info); num += sizeof(unsigned long)) {
if (((num % 5) == 0)) {
pr_cont(" \\\n");
pr_info("\t");
}
pr_cont("%s0x%04lx",
num?", ":"", *p++);
}
pr_cont("\n\n");
}
static __init int qemu_print_hpa(struct device *lin_dev, void *data)
{
struct parisc_device *dev = to_parisc_device(lin_dev);
unsigned long hpa = dev->hpa.start;
pr_cont("\t{\t.hpa = 0x%08lx,\\\n", hpa);
pr_cont("\t\t.iodc = &iodc_data_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.mod_info = &mod_info_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.mod_path = &mod_path_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.num_addr = HPA_%08lx_num_addr,\\\n", hpa);
pr_cont("\t\t.add_addr = { HPA_%08lx_add_addr } },\\\n", hpa);
return 0;
}
static __init void qemu_footer(void)
{
pr_info("\n\n#define PARISC_DEVICE_LIST \\\n");
for_each_padev(qemu_print_hpa, NULL);
pr_cont("\t{ 0, }\n");
pr_info("--- cut here ---\n");
}
/* print iodc data of the various hpa modules for qemu inclusion */
static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
{
struct parisc_device *dev = to_parisc_device(lin_dev);
unsigned long count;
unsigned long hpa = dev->hpa.start;
int status;
struct pdc_iodc iodc_data;
int mod_index;
struct pdc_system_map_mod_info pdc_mod_info;
struct pdc_module_path mod_path;
status = pdc_iodc_read(&count, hpa, 0,
&iodc_data, sizeof(iodc_data));
if (status != PDC_OK) {
pr_info("No IODC data for hpa 0x%08lx\n", hpa);
return 0;
}
pr_info("\n");
/* Prevent hung task messages when printing on serial console */
cond_resched();
pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
hpa, parisc_hardware_description(&dev->id));
mod_index = 0;
do {
status = pdc_system_map_find_mods(&pdc_mod_info,
&mod_path, mod_index++);
} while (status == PDC_OK && pdc_mod_info.mod_addr != hpa);
pr_info("static struct pdc_system_map_mod_info"
" mod_info_hpa_%08lx = {\n", hpa);
#define DO(member) \
pr_cont("\t." #member " = 0x%x,\n", \
(unsigned int)pdc_mod_info.member)
DO(mod_addr);
DO(mod_pgs);
DO(add_addrs);
pr_cont("};\n");
#undef DO
pr_info("static struct pdc_module_path "
"mod_path_hpa_%08lx = {\n", hpa);
pr_cont("\t.path = { ");
pr_cont(".flags = 0x%x, ", mod_path.path.flags);
pr_cont(".bc = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }, ",
(unsigned char)mod_path.path.bc[0],
(unsigned char)mod_path.path.bc[1],
(unsigned char)mod_path.path.bc[2],
(unsigned char)mod_path.path.bc[3],
(unsigned char)mod_path.path.bc[4],
(unsigned char)mod_path.path.bc[5]);
pr_cont(".mod = 0x%x ", mod_path.path.mod);
pr_cont(" },\n");
pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
mod_path.layers[0], mod_path.layers[1], mod_path.layers[2],
mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]);
pr_cont("};\n");
pr_info("static struct pdc_iodc iodc_data_hpa_%08lx = {\n", hpa);
#define DO(member) \
pr_cont("\t." #member " = 0x%04lx,\n", \
(unsigned long)iodc_data.member)
DO(hversion_model);
DO(hversion);
DO(spa);
DO(type);
DO(sversion_rev);
DO(sversion_model);
DO(sversion_opt);
DO(rev);
DO(dep);
DO(features);
DO(checksum);
DO(length);
#undef DO
pr_cont("\t/* pad: 0x%04x, 0x%04x */\n",
iodc_data.pad[0], iodc_data.pad[1]);
pr_cont("};\n");
pr_info("#define HPA_%08lx_num_addr %d\n", hpa, dev->num_addrs);
pr_info("#define HPA_%08lx_add_addr ", hpa);
count = 0;
if (dev->num_addrs == 0)
pr_cont("0");
while (count < dev->num_addrs) {
pr_cont("0x%08lx, ", dev->addr[count]);
count++;
}
pr_cont("\n\n");
return 0;
}
static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev))
print_parisc_device(pdev);
return 0;
}
/**
* print_parisc_devices - Print out a list of devices found in this system
*/
void __init print_parisc_devices(void)
{
for_each_padev(print_one_device, NULL);
#define PARISC_QEMU_MACHINE_HEADER 0
if (PARISC_QEMU_MACHINE_HEADER) {
qemu_header();
for_each_padev(qemu_print_iodc_data, NULL);
qemu_footer();
}
}
|
// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1516.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
* Project manager: Eric Stolz
*
* ADDI-DATA GmbH
* Dieselstrasse 3
* D-77833 Ottersweier
* Tel: +19(0)7223/9493-0
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* [email protected]
*/
#include <linux/module.h>
#include <linux/comedi/comedi_pci.h>
#include "addi_watchdog.h"
/*
* PCI bar 1 I/O Register map - Digital input/output
*/
#define APCI1516_DI_REG 0x00
#define APCI1516_DO_REG 0x04
/*
* PCI bar 2 I/O Register map - Watchdog (APCI-1516 and APCI-2016)
*/
#define APCI1516_WDOG_REG 0x00
enum apci1516_boardid {
BOARD_APCI1016,
BOARD_APCI1516,
BOARD_APCI2016,
};
struct apci1516_boardinfo {
const char *name;
int di_nchan;
int do_nchan;
int has_wdog;
};
static const struct apci1516_boardinfo apci1516_boardtypes[] = {
[BOARD_APCI1016] = {
.name = "apci1016",
.di_nchan = 16,
},
[BOARD_APCI1516] = {
.name = "apci1516",
.di_nchan = 8,
.do_nchan = 8,
.has_wdog = 1,
},
[BOARD_APCI2016] = {
.name = "apci2016",
.do_nchan = 16,
.has_wdog = 1,
},
};
struct apci1516_private {
unsigned long wdog_iobase;
};
static int apci1516_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inw(dev->iobase + APCI1516_DI_REG);
return insn->n;
}
static int apci1516_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
s->state = inw(dev->iobase + APCI1516_DO_REG);
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + APCI1516_DO_REG);
data[1] = s->state;
return insn->n;
}
static int apci1516_reset(struct comedi_device *dev)
{
const struct apci1516_boardinfo *board = dev->board_ptr;
struct apci1516_private *devpriv = dev->private;
if (!board->has_wdog)
return 0;
outw(0x0, dev->iobase + APCI1516_DO_REG);
addi_watchdog_reset(devpriv->wdog_iobase);
return 0;
}
static int apci1516_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct apci1516_boardinfo *board = NULL;
struct apci1516_private *devpriv;
struct comedi_subdevice *s;
int ret;
if (context < ARRAY_SIZE(apci1516_boardtypes))
board = &apci1516_boardtypes[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 1);
devpriv->wdog_iobase = pci_resource_start(pcidev, 2);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
/* Initialize the digital input subdevice */
s = &dev->subdevices[0];
if (board->di_nchan) {
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = board->di_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci1516_di_insn_bits;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Initialize the digital output subdevice */
s = &dev->subdevices[1];
if (board->do_nchan) {
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->do_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci1516_do_insn_bits;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Initialize the watchdog subdevice */
s = &dev->subdevices[2];
if (board->has_wdog) {
ret = addi_watchdog_init(s, devpriv->wdog_iobase);
if (ret)
return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
apci1516_reset(dev);
return 0;
}
static void apci1516_detach(struct comedi_device *dev)
{
if (dev->iobase)
apci1516_reset(dev);
comedi_pci_detach(dev);
}
static struct comedi_driver apci1516_driver = {
.driver_name = "addi_apci_1516",
.module = THIS_MODULE,
.auto_attach = apci1516_auto_attach,
.detach = apci1516_detach,
};
static int apci1516_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci1516_driver, id->driver_data);
}
static const struct pci_device_id apci1516_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x1000), BOARD_APCI1016 },
{ PCI_VDEVICE(ADDIDATA, 0x1001), BOARD_APCI1516 },
{ PCI_VDEVICE(ADDIDATA, 0x1002), BOARD_APCI2016 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci1516_pci_table);
static struct pci_driver apci1516_pci_driver = {
.name = "addi_apci_1516",
.id_table = apci1516_pci_table,
.probe = apci1516_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1516_driver, apci1516_pci_driver);
MODULE_DESCRIPTION("ADDI-DATA APCI-1016/1516/2016, 16 channel DIO boards");
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
#include "msm8916-samsung-a2015-common.dtsi"
/ {
haptic {
compatible = "regulator-haptic";
haptic-supply = <®_motor_vdd>;
min-microvolt = <3300000>;
max-microvolt = <3300000>;
};
i2c-muic {
/* SM5504 MUIC instead of SM5502 */
/delete-node/ extcon@25;
muic: extcon@14 {
compatible = "siliconmitus,sm5504-muic";
reg = <0x14>;
interrupt-parent = <&tlmm>;
interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
pinctrl-names = "default";
pinctrl-0 = <&muic_int_default>;
usb_con: connector {
compatible = "usb-b-connector";
label = "micro-USB";
type = "micro";
};
};
};
reg_touch_key: regulator-touch-key {
compatible = "regulator-fixed";
regulator-name = "touch_key";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&tlmm 97 GPIO_ACTIVE_HIGH>;
enable-active-high;
pinctrl-names = "default";
pinctrl-0 = <&tkey_en_default>;
};
};
&blsp_i2c2 {
/* lis2hh12 accelerometer instead of BMC150 */
/delete-node/ accelerometer@10;
/delete-node/ magnetometer@12;
accelerometer@1d {
compatible = "st,lis2hh12";
reg = <0x1d>;
interrupt-parent = <&tlmm>;
interrupts = <115 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <&pm8916_l5>;
vddio-supply = <&pm8916_l5>;
st,drdy-int-pin = <1>;
mount-matrix = "1", "0", "0",
"0", "-1", "0",
"0", "0", "1";
pinctrl-0 = <&accel_int_default>;
pinctrl-names = "default";
};
};
&mpss_mem {
reg = <0x0 0x86800000 0x0 0x5a00000>;
};
®_motor_vdd {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
&touchkey {
vcc-supply = <®_touch_key>;
vdd-supply = <®_touch_key>;
};
&wcnss {
status = "okay";
};
&wcnss_iris {
compatible = "qcom,wcn3620";
};
&wcnss_mem {
status = "okay";
};
&tlmm {
tkey_en_default: tkey-en-default-state {
pins = "gpio97";
function = "gpio";
drive-strength = <2>;
bias-disable;
};
};
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/device.h>
#include "ipu3.h"
#include "ipu3-css-pool.h"
#include "ipu3-dmamap.h"
int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
struct imgu_css_map *map, size_t size)
{
if (map->size < size && map->vaddr) {
dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
map->size, size);
imgu_dmamap_free(imgu, map);
if (!imgu_dmamap_alloc(imgu, map, size))
return -ENOMEM;
}
return 0;
}
void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
imgu_dmamap_free(imgu, &pool->entry[i].param);
}
int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
size_t size)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++) {
pool->entry[i].valid = false;
if (size == 0) {
pool->entry[i].param.vaddr = NULL;
continue;
}
if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size))
goto fail;
}
pool->last = IPU3_CSS_POOL_SIZE;
return 0;
fail:
imgu_css_pool_cleanup(imgu, pool);
return -ENOMEM;
}
/*
* Allocate a new parameter via recycling the oldest entry in the pool.
*/
void imgu_css_pool_get(struct imgu_css_pool *pool)
{
/* Get the oldest entry */
u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
pool->entry[n].valid = true;
pool->last = n;
}
/*
* Undo, for all practical purposes, the effect of pool_get().
*/
void imgu_css_pool_put(struct imgu_css_pool *pool)
{
pool->entry[pool->last].valid = false;
pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
}
/**
* imgu_css_pool_last - Retrieve the nth pool entry from last
*
* @pool: a pointer to &struct imgu_css_pool.
* @n: the distance to the last index.
*
* Returns:
* The nth entry from last or null map to indicate no frame stored.
*/
const struct imgu_css_map *
imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n)
{
static const struct imgu_css_map null_map = { 0 };
int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
WARN_ON(n >= IPU3_CSS_POOL_SIZE);
if (!pool->entry[i].valid)
return &null_map;
return &pool->entry[i].param;
}
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Driver for Microsemi VSC85xx PHYs
*
* Author: Nagaraju Lakkaraju
* License: Dual MIT/GPL
* Copyright (c) 2016 Microsemi Corporation
*/
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
#include "mscc_serdes.h"
#include "mscc.h"
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
{
.string = "phy_receive_errors",
.reg = MSCC_PHY_ERR_RX_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_false_carrier",
.reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_cu_media_link_disconnect",
.reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_cu_media_crc_good_count",
.reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
.page = MSCC_PHY_PAGE_EXTENDED,
.mask = VALID_CRC_CNT_CRC_MASK,
}, {
.string = "phy_cu_media_crc_error_count",
.reg = MSCC_PHY_EXT_PHY_CNTL_4,
.page = MSCC_PHY_PAGE_EXTENDED,
.mask = ERR_CNT_MASK,
},
};
static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
{
.string = "phy_receive_errors",
.reg = MSCC_PHY_ERR_RX_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_false_carrier",
.reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_cu_media_link_disconnect",
.reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
.page = MSCC_PHY_PAGE_STANDARD,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_cu_media_crc_good_count",
.reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
.page = MSCC_PHY_PAGE_EXTENDED,
.mask = VALID_CRC_CNT_CRC_MASK,
}, {
.string = "phy_cu_media_crc_error_count",
.reg = MSCC_PHY_EXT_PHY_CNTL_4,
.page = MSCC_PHY_PAGE_EXTENDED,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_serdes_tx_good_pkt_count",
.reg = MSCC_PHY_SERDES_TX_VALID_CNT,
.page = MSCC_PHY_PAGE_EXTENDED_3,
.mask = VALID_CRC_CNT_CRC_MASK,
}, {
.string = "phy_serdes_tx_bad_crc_count",
.reg = MSCC_PHY_SERDES_TX_CRC_ERR_CNT,
.page = MSCC_PHY_PAGE_EXTENDED_3,
.mask = ERR_CNT_MASK,
}, {
.string = "phy_serdes_rx_good_pkt_count",
.reg = MSCC_PHY_SERDES_RX_VALID_CNT,
.page = MSCC_PHY_PAGE_EXTENDED_3,
.mask = VALID_CRC_CNT_CRC_MASK,
}, {
.string = "phy_serdes_rx_bad_crc_count",
.reg = MSCC_PHY_SERDES_RX_CRC_ERR_CNT,
.page = MSCC_PHY_PAGE_EXTENDED_3,
.mask = ERR_CNT_MASK,
},
};
#if IS_ENABLED(CONFIG_OF_MDIO)
static const struct vsc8531_edge_rate_table edge_table[] = {
{MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
{MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
{MSCC_VDDMAC_1800, { 0, 5, 9, 16, 23, 35, 52, 76} },
{MSCC_VDDMAC_1500, { 0, 6, 14, 21, 29, 42, 58, 77} },
};
#endif
static const int vsc85xx_internal_delay[] = {200, 800, 1100, 1700, 2000, 2300,
2600, 3400};
static int vsc85xx_phy_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS);
}
static int vsc85xx_phy_write_page(struct phy_device *phydev, int page)
{
return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
}
static int vsc85xx_get_sset_count(struct phy_device *phydev)
{
struct vsc8531_private *priv = phydev->priv;
if (!priv)
return 0;
return priv->nstats;
}
static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
{
struct vsc8531_private *priv = phydev->priv;
int i;
if (!priv)
return;
for (i = 0; i < priv->nstats; i++)
ethtool_puts(&data, priv->hw_stats[i].string);
}
static u64 vsc85xx_get_stat(struct phy_device *phydev, int i)
{
struct vsc8531_private *priv = phydev->priv;
int val;
val = phy_read_paged(phydev, priv->hw_stats[i].page,
priv->hw_stats[i].reg);
if (val < 0)
return U64_MAX;
val = val & priv->hw_stats[i].mask;
priv->stats[i] += val;
return priv->stats[i];
}
static void vsc85xx_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
struct vsc8531_private *priv = phydev->priv;
int i;
if (!priv)
return;
for (i = 0; i < priv->nstats; i++)
data[i] = vsc85xx_get_stat(phydev, i);
}
static int vsc85xx_led_cntl_set(struct phy_device *phydev,
u8 led_num,
u8 mode)
{
int rc;
u16 reg_val;
mutex_lock(&phydev->lock);
reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
reg_val &= ~LED_MODE_SEL_MASK(led_num);
reg_val |= LED_MODE_SEL(led_num, (u16)mode);
rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
mutex_unlock(&phydev->lock);
return rc;
}
static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
{
u16 reg_val;
reg_val = phy_read(phydev, MSCC_PHY_DEV_AUX_CNTL);
if (reg_val & HP_AUTO_MDIX_X_OVER_IND_MASK)
*mdix = ETH_TP_MDI_X;
else
*mdix = ETH_TP_MDI;
return 0;
}
static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
{
int rc;
u16 reg_val;
reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL);
if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) {
reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK |
DISABLE_POLARITY_CORR_MASK |
DISABLE_HP_AUTO_MDIX_MASK);
} else {
reg_val &= ~(DISABLE_PAIR_SWAP_CORR_MASK |
DISABLE_POLARITY_CORR_MASK |
DISABLE_HP_AUTO_MDIX_MASK);
}
rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val);
if (rc)
return rc;
reg_val = 0;
if (mdix == ETH_TP_MDI)
reg_val = FORCE_MDI_CROSSOVER_MDI;
else if (mdix == ETH_TP_MDI_X)
reg_val = FORCE_MDI_CROSSOVER_MDIX;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK,
reg_val);
if (rc < 0)
return rc;
return genphy_restart_aneg(phydev);
}
static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count)
{
int reg_val;
reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
MSCC_PHY_ACTIPHY_CNTL);
if (reg_val < 0)
return reg_val;
reg_val &= DOWNSHIFT_CNTL_MASK;
if (!(reg_val & DOWNSHIFT_EN))
*count = DOWNSHIFT_DEV_DISABLE;
else
*count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2;
return 0;
}
static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
{
if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) {
/* Default downshift count 3 (i.e. Bit3:2 = 0b01) */
count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
} else if (count > DOWNSHIFT_COUNT_MAX || count == 1) {
phydev_err(phydev, "Downshift count should be 2,3,4 or 5\n");
return -ERANGE;
} else if (count) {
/* Downshift count is either 2,3,4 or 5 */
count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
}
return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK,
count);
}
static int vsc85xx_wol_set(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
const u8 *mac_addr = phydev->attached_dev->dev_addr;
int rc;
u16 reg_val;
u8 i;
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
return phy_restore_page(phydev, rc, rc);
if (wol->wolopts & WAKE_MAGIC) {
/* Store the device address for the magic packet */
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
mac_addr[5 - i * 2];
__phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
__phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
__phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
} else {
__phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
__phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
__phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
}
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
wol_conf->sopass[5 - i * 2];
__phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
__phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
__phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
} else {
__phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
__phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
__phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
}
reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (wol_conf->wolopts & WAKE_MAGICSECURE)
reg_val |= SECURE_ON_ENABLE;
else
reg_val &= ~SECURE_ON_ENABLE;
__phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
if (rc < 0)
return rc;
if (wol->wolopts & WAKE_MAGIC) {
/* Enable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val |= MII_VSC85XX_INT_MASK_WOL;
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
return rc;
} else {
/* Disable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
return rc;
}
/* Clear WOL iterrupt status */
reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
return 0;
}
static void vsc85xx_wol_get(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
int rc;
u16 reg_val;
u8 i;
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
goto out_restore_page;
reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (reg_val & SECURE_ON_ENABLE)
wol_conf->wolopts |= WAKE_MAGICSECURE;
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
for (i = 0; i < ARRAY_SIZE(pwd); i++) {
wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
>> 8;
}
}
out_restore_page:
phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
}
#if IS_ENABLED(CONFIG_OF_MDIO)
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
u32 vdd, sd;
int i, j;
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown);
if (!of_node)
return -ENODEV;
if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd))
vdd = MSCC_VDDMAC_3300;
if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd))
sd = 0;
for (i = 0; i < ARRAY_SIZE(edge_table); i++)
if (edge_table[i].vddmac == vdd)
for (j = 0; j < sd_array_size; j++)
if (edge_table[i].slowdown[j] == sd)
return (sd_array_size - j - 1);
return -EINVAL;
}
static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
char *led,
u32 default_mode)
{
struct vsc8531_private *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
u32 led_mode;
int err;
if (!of_node)
return -ENODEV;
led_mode = default_mode;
err = of_property_read_u32(of_node, led, &led_mode);
if (!err && !(BIT(led_mode) & priv->supp_led_modes)) {
phydev_err(phydev, "DT %s invalid\n", led);
return -EINVAL;
}
return led_mode;
}
#else
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
return 0;
}
static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
char *led,
u8 default_mode)
{
return default_mode;
}
#endif /* CONFIG_OF_MDIO */
static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
u32 *default_mode)
{
struct vsc8531_private *priv = phydev->priv;
char led_dt_prop[28];
int i, ret;
for (i = 0; i < priv->nleds; i++) {
ret = sprintf(led_dt_prop, "vsc8531,led-%d-mode", i);
if (ret < 0)
return ret;
ret = vsc85xx_dt_led_mode_get(phydev, led_dt_prop,
default_mode[i]);
if (ret < 0)
return ret;
priv->leds_mode[i] = ret;
}
return 0;
}
static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
{
int rc;
mutex_lock(&phydev->lock);
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK,
edge_rate << EDGE_RATE_CNTL_POS);
mutex_unlock(&phydev->lock);
return rc;
}
static int vsc85xx_mac_if_set(struct phy_device *phydev,
phy_interface_t interface)
{
int rc;
u16 reg_val;
mutex_lock(&phydev->lock);
reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
reg_val &= ~(MAC_IF_SELECTION_MASK);
switch (interface) {
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
break;
case PHY_INTERFACE_MODE_RMII:
reg_val |= (MAC_IF_SELECTION_RMII << MAC_IF_SELECTION_POS);
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
reg_val |= (MAC_IF_SELECTION_GMII << MAC_IF_SELECTION_POS);
break;
default:
rc = -EINVAL;
goto out_unlock;
}
rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
if (rc)
goto out_unlock;
rc = genphy_soft_reset(phydev);
out_unlock:
mutex_unlock(&phydev->lock);
return rc;
}
/* Set the RGMII RX and TX clock skews individually, according to the PHY
* interface type, to:
* * 0.2 ns (their default, and lowest, hardware value) if delays should
* not be enabled
* * 2.0 ns (which causes the data to be sampled at exactly half way between
* clock transitions at 1000 Mbps) if delays should be enabled
*/
static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
u16 rgmii_rx_delay_mask,
u16 rgmii_tx_delay_mask)
{
u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
int delay_size = ARRAY_SIZE(vsc85xx_internal_delay);
struct device *dev = &phydev->mdio.dev;
u16 reg_val = 0;
u16 mask = 0;
s32 rx_delay;
s32 tx_delay;
int rc = 0;
/* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
* to be unset for all PHY modes, so do that as part of the paged
* register modification.
* For some family members (like VSC8530/31/40/41) this bit is reserved
* and read-only, and the RX clock is enabled by default.
*/
if (rgmii_cntl == VSC8502_RGMII_CNTL)
mask |= VSC8502_RGMII_RX_CLK_DISABLE;
if (phy_interface_is_rgmii(phydev))
mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
rx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
delay_size, true);
if (rx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
rx_delay = RGMII_CLK_DELAY_2_0_NS;
else
rx_delay = RGMII_CLK_DELAY_0_2_NS;
}
tx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
delay_size, false);
if (tx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
tx_delay = RGMII_CLK_DELAY_2_0_NS;
else
tx_delay = RGMII_CLK_DELAY_0_2_NS;
}
reg_val |= rx_delay << rgmii_rx_delay_pos;
reg_val |= tx_delay << rgmii_tx_delay_pos;
if (mask)
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
rgmii_cntl, mask, reg_val);
return rc;
}
static int vsc85xx_default_config(struct phy_device *phydev)
{
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
VSC8502_RGMII_RX_DELAY_MASK,
VSC8502_RGMII_TX_DELAY_MASK);
}
static int vsc85xx_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
return vsc85xx_downshift_get(phydev, (u8 *)data);
default:
return -EINVAL;
}
}
static int vsc85xx_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna,
const void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
return vsc85xx_downshift_set(phydev, *(u8 *)data);
default:
return -EINVAL;
}
}
/* mdiobus lock should be locked when using this function */
static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
{
__phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
__phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
__phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
}
static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
{
int rc;
static const struct reg_val init_seq[] = {
{0x0f90, 0x00688980},
{0x0696, 0x00000003},
{0x07fa, 0x0050100f},
{0x1686, 0x00000004},
};
unsigned int i;
int oldpage;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_STANDARD,
MSCC_PHY_EXT_CNTL_STATUS, SMI_BROADCAST_WR_EN,
SMI_BROADCAST_WR_EN);
if (rc < 0)
return rc;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
MSCC_PHY_TEST_PAGE_24, 0, 0x0400);
if (rc < 0)
return rc;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
MSCC_PHY_TEST_PAGE_5, 0x0a00, 0x0e00);
if (rc < 0)
return rc;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
MSCC_PHY_TEST_PAGE_8, TR_CLK_DISABLE, TR_CLK_DISABLE);
if (rc < 0)
return rc;
mutex_lock(&phydev->lock);
oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
if (oldpage < 0)
goto out_unlock;
for (i = 0; i < ARRAY_SIZE(init_seq); i++)
vsc85xx_tr_write(phydev, init_seq[i].reg, init_seq[i].val);
out_unlock:
oldpage = phy_restore_page(phydev, oldpage, oldpage);
mutex_unlock(&phydev->lock);
return oldpage;
}
static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
{
static const struct reg_val init_eee[] = {
{0x0f82, 0x0012b00a},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
{0x17a2, 0x00000620},
{0x16a0, 0x00eeffdd},
{0x16a6, 0x00071448},
{0x16a4, 0x0013132f},
{0x16a8, 0x00000000},
{0x0ffc, 0x00c0a028},
{0x0fe8, 0x0091b06c},
{0x0fea, 0x00041600},
{0x0f80, 0x00000af4},
{0x0fec, 0x00901809},
{0x0fee, 0x0000a6a1},
{0x0ffe, 0x00b01007},
{0x16b0, 0x00eeff00},
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
unsigned int i;
int oldpage;
mutex_lock(&phydev->lock);
oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
if (oldpage < 0)
goto out_unlock;
for (i = 0; i < ARRAY_SIZE(init_eee); i++)
vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val);
out_unlock:
oldpage = phy_restore_page(phydev, oldpage, oldpage);
mutex_unlock(&phydev->lock);
return oldpage;
}
/* phydev->bus->mdio_lock should be locked when using this function */
int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
{
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
return __phy_package_write(phydev, VSC88XX_BASE_ADDR, regnum, val);
}
/* phydev->bus->mdio_lock should be locked when using this function */
int phy_base_read(struct phy_device *phydev, u32 regnum)
{
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
return __phy_package_read(phydev, VSC88XX_BASE_ADDR, regnum);
}
u32 vsc85xx_csr_read(struct phy_device *phydev,
enum csr_target target, u32 reg)
{
unsigned long deadline;
u32 val, val_l, val_h;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
/* CSR registers are grouped under different Target IDs.
* 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
* MSCC_EXT_PAGE_CSR_CNTL_19 registers.
* Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
* and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
*/
/* Setup the Target ID */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
/* non-MACsec access */
target &= 0x3;
else
target = 0;
/* Trigger CSR Action - Read into the CSR's */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
MSCC_PHY_CSR_CNTL_19_TARGET(target));
/* Wait for register access*/
deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
do {
usleep_range(500, 1000);
val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
} while (time_before(jiffies, deadline) &&
!(val & MSCC_PHY_CSR_CNTL_19_CMD));
if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
return 0xffffffff;
/* Read the Least Significant Word (LSW) (17) */
val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
/* Read the Most Significant Word (MSW) (18) */
val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
return (val_h << 16) | val_l;
}
int vsc85xx_csr_write(struct phy_device *phydev,
enum csr_target target, u32 reg, u32 val)
{
unsigned long deadline;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
/* CSR registers are grouped under different Target IDs.
* 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
* MSCC_EXT_PAGE_CSR_CNTL_19 registers.
* Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
* and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
*/
/* Setup the Target ID */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
/* Write the Least Significant Word (LSW) (17) */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
/* Write the Most Significant Word (MSW) (18) */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
/* non-MACsec access */
target &= 0x3;
else
target = 0;
/* Trigger CSR Action - Write into the CSR's */
phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
MSCC_PHY_CSR_CNTL_19_CMD |
MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
MSCC_PHY_CSR_CNTL_19_TARGET(target));
/* Wait for register access */
deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
do {
usleep_range(500, 1000);
val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
} while (time_before(jiffies, deadline) &&
!(val & MSCC_PHY_CSR_CNTL_19_CMD));
if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
return -ETIMEDOUT;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
return 0;
}
/* bus->mdio_lock should be locked when using this function */
static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
{
phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
}
/* bus->mdio_lock should be locked when using this function */
int vsc8584_cmd(struct phy_device *phydev, u16 val)
{
unsigned long deadline;
u16 reg_val;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val);
deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
do {
reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD);
} while (time_before(jiffies, deadline) &&
(reg_val & PROC_CMD_NCOMPLETED) &&
!(reg_val & PROC_CMD_FAILED));
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
if (reg_val & PROC_CMD_FAILED)
return -EIO;
if (reg_val & PROC_CMD_NCOMPLETED)
return -ETIMEDOUT;
return 0;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_micro_deassert_reset(struct phy_device *phydev,
bool patch_en)
{
u32 enable, release;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN;
release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
MICRO_CLK_EN;
if (patch_en) {
enable |= MICRO_PATCH_EN;
release |= MICRO_PATCH_EN;
/* Clear all patches */
phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
}
/* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock
* override and addr. auto-incr; operate at 125 MHz
*/
phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable);
/* Release 8051 Micro SW reset */
phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
return 0;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_micro_assert_reset(struct phy_device *phydev)
{
int ret;
u16 reg;
ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
if (ret)
return ret;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b);
phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b);
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
reg |= EN_PATCH_RAM_TRAP_ADDR(4);
phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP);
reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
reg &= ~MICRO_NSOFT_RESET;
phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg);
phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF |
PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF |
PROC_CMD_READ);
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
return 0;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size,
u16 *crc)
{
int ret;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start);
phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size);
/* Start Micro command */
ret = vsc8584_cmd(phydev, PROC_CMD_CRC16);
if (ret)
goto out;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
*crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2);
out:
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
return ret;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_patch_fw(struct phy_device *phydev,
const struct firmware *fw)
{
int i, ret;
ret = vsc8584_micro_assert_reset(phydev);
if (ret) {
dev_err(&phydev->mdio.dev,
"%s: failed to assert reset of micro\n", __func__);
return ret;
}
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
/* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock
* Disable the 8051 Micro clock
*/
phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM |
AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN |
MICRO_CLK_DIVIDE(2));
phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN |
INT_MEM_DATA(2));
phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000);
for (i = 0; i < fw->size; i++)
phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM |
INT_MEM_WRITE_EN | fw->data[i]);
/* Clear internal memory access */
phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
return 0;
}
/* bus->mdio_lock should be locked when using this function */
static bool vsc8574_is_serdes_init(struct phy_device *phydev)
{
u16 reg;
bool ret;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1));
if (reg != 0x3eb7) {
ret = false;
goto out;
}
reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1));
if (reg != 0x4012) {
ret = false;
goto out;
}
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) {
ret = false;
goto out;
}
reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) {
ret = false;
goto out;
}
ret = true;
out:
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
return ret;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8574_config_pre_init(struct phy_device *phydev)
{
static const struct reg_val pre_init1[] = {
{0x0fae, 0x000401bd},
{0x0fac, 0x000f000f},
{0x17a0, 0x00a0f147},
{0x0fe4, 0x00052f54},
{0x1792, 0x0027303d},
{0x07fe, 0x00000704},
{0x0fe0, 0x00060150},
{0x0f82, 0x0012b00a},
{0x0f80, 0x00000d74},
{0x02e0, 0x00000012},
{0x03a2, 0x00050208},
{0x03b2, 0x00009186},
{0x0fb0, 0x000e3700},
{0x1688, 0x00049f81},
{0x0fd2, 0x0000ffff},
{0x168a, 0x00039fa2},
{0x1690, 0x0020640b},
{0x0258, 0x00002220},
{0x025a, 0x00002a20},
{0x025c, 0x00003060},
{0x025e, 0x00003fa0},
{0x03a6, 0x0000e0f0},
{0x0f92, 0x00001489},
{0x16a2, 0x00007000},
{0x16a6, 0x00071448},
{0x16a0, 0x00eeffdd},
{0x0fe8, 0x0091b06c},
{0x0fea, 0x00041600},
{0x16b0, 0x00eeff00},
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
{0x0f90, 0x00688980},
{0x03a4, 0x0000d8f0},
{0x0fc0, 0x00000400},
{0x07fa, 0x0050100f},
{0x0796, 0x00000003},
{0x07f8, 0x00c3ff98},
{0x0fa4, 0x0018292a},
{0x168c, 0x00d2c46f},
{0x17a2, 0x00000620},
{0x16a4, 0x0013132f},
{0x16a8, 0x00000000},
{0x0ffc, 0x00c0a028},
{0x0fec, 0x00901c09},
{0x0fee, 0x0004a6a1},
{0x0ffe, 0x00b01807},
};
static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
{0x048e, 0x00000db6},
{0x049c, 0x00596596},
{0x049e, 0x00000514},
{0x04a2, 0x00410280},
{0x04a4, 0x00000000},
{0x04a6, 0x00000000},
{0x04a8, 0x00000000},
{0x04aa, 0x00000000},
{0x04ae, 0x007df7dd},
{0x04b0, 0x006d95d4},
{0x04b2, 0x00492410},
};
struct device *dev = &phydev->mdio.dev;
const struct firmware *fw;
unsigned int i;
u16 crc, reg;
bool serdes_init;
int ret;
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* all writes below are broadcasted to all PHYs in the same package */
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg |= SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
/* The below register writes are tweaking analog and electrical
* configuration that were determined through characterization by PHY
* engineers. These don't mean anything more than "these are the best
* values".
*/
phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg |= TR_CLK_DISABLE;
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg &= ~TR_CLK_DISABLE;
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* end of write broadcasting */
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg &= ~SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev);
if (ret) {
dev_err(dev, "failed to load firmware %s, ret: %d\n",
MSCC_VSC8574_REVB_INT8051_FW, ret);
return ret;
}
/* Add one byte to size for the one added by the patch_fw function */
ret = vsc8584_get_fw_crc(phydev,
MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
fw->size + 1, &crc);
if (ret)
goto out;
if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) {
serdes_init = vsc8574_is_serdes_init(phydev);
if (!serdes_init) {
ret = vsc8584_micro_assert_reset(phydev);
if (ret) {
dev_err(dev,
"%s: failed to assert reset of micro\n",
__func__);
goto out;
}
}
} else {
dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
serdes_init = false;
if (vsc8584_patch_fw(phydev, fw))
dev_warn(dev,
"failed to patch FW, expect non-optimal device\n");
}
if (!serdes_init) {
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7);
phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012);
phy_base_write(phydev, MSCC_INT_MEM_CNTL,
EN_PATCH_RAM_TRAP_ADDR(1));
vsc8584_micro_deassert_reset(phydev, false);
/* Add one byte to size for the one added by the patch_fw
* function
*/
ret = vsc8584_get_fw_crc(phydev,
MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
fw->size + 1, &crc);
if (ret)
goto out;
if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC)
dev_warn(dev,
"FW CRC after patching is not the expected one, expect non-optimal device\n");
}
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT |
PROC_CMD_PHY_INIT);
out:
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
release_firmware(fw);
return ret;
}
/* Access LCPLL Cfg_2 */
static void vsc8584_pll5g_cfg2_wr(struct phy_device *phydev,
bool disable_fsm)
{
u32 rd_dat;
rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
rd_dat &= ~BIT(PHY_S6G_CFG2_FSM_DIS);
rd_dat |= (disable_fsm << PHY_S6G_CFG2_FSM_DIS);
vsc85xx_csr_write(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2, rd_dat);
}
/* trigger a read to the spcified MCB */
static int vsc8584_mcb_rd_trig(struct phy_device *phydev,
u32 mcb_reg_addr, u8 mcb_slave_num)
{
u32 rd_dat = 0;
/* read MCB */
vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
(0x40000000 | (1L << mcb_slave_num)));
return read_poll_timeout(vsc85xx_csr_read, rd_dat,
!(rd_dat & 0x40000000),
4000, 200000, 0,
phydev, MACRO_CTRL, mcb_reg_addr);
}
/* trigger a write to the spcified MCB */
static int vsc8584_mcb_wr_trig(struct phy_device *phydev,
u32 mcb_reg_addr,
u8 mcb_slave_num)
{
u32 rd_dat = 0;
/* write back MCB */
vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
(0x80000000 | (1L << mcb_slave_num)));
return read_poll_timeout(vsc85xx_csr_read, rd_dat,
!(rd_dat & 0x80000000),
4000, 200000, 0,
phydev, MACRO_CTRL, mcb_reg_addr);
}
/* Sequence to Reset LCPLL for the VIPER and ELISE PHY */
static int vsc8584_pll5g_reset(struct phy_device *phydev)
{
bool dis_fsm;
int ret = 0;
ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
if (ret < 0)
goto done;
dis_fsm = 1;
/* Reset LCPLL */
vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
/* write back LCPLL MCB */
ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
if (ret < 0)
goto done;
/* 10 mSec sleep while LCPLL is hold in reset */
usleep_range(10000, 20000);
/* read LCPLL MCB into CSRs */
ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
if (ret < 0)
goto done;
dis_fsm = 0;
/* Release the Reset of LCPLL */
vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
/* write back LCPLL MCB */
ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
if (ret < 0)
goto done;
usleep_range(110000, 200000);
done:
return ret;
}
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_config_pre_init(struct phy_device *phydev)
{
static const struct reg_val pre_init1[] = {
{0x07fa, 0x0050100f},
{0x1688, 0x00049f81},
{0x0f90, 0x00688980},
{0x03a4, 0x0000d8f0},
{0x0fc0, 0x00000400},
{0x0f82, 0x0012b002},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
{0x17a2, 0x00000620},
{0x16a0, 0x00eeffdd},
{0x16a6, 0x00071448},
{0x16a4, 0x0013132f},
{0x16a8, 0x00000000},
{0x0ffc, 0x00c0a028},
{0x0fe8, 0x0091b06c},
{0x0fea, 0x00041600},
{0x0f80, 0x00fffaff},
{0x0fec, 0x00901809},
{0x0ffe, 0x00b01007},
{0x16b0, 0x00eeff00},
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
};
const struct firmware *fw;
struct device *dev = &phydev->mdio.dev;
unsigned int i;
u16 crc, reg;
int ret;
ret = vsc8584_pll5g_reset(phydev);
if (ret < 0) {
dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
return ret;
}
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* all writes below are broadcasted to all PHYs in the same package */
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg |= SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
reg = phy_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
reg |= PARALLEL_DET_IGNORE_ADVERTISED;
phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg);
/* The below register writes are tweaking analog and electrical
* configuration that were determined through characterization by PHY
* engineers. These don't mean anything more than "these are the best
* values".
*/
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3);
phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg |= TR_CLK_DISABLE;
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4));
reg = phy_base_read(phydev, MSCC_PHY_TR_MSB);
reg &= ~0x007f;
reg |= 0x0019;
phy_base_write(phydev, MSCC_PHY_TR_MSB, reg);
phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4));
for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg &= ~TR_CLK_DISABLE;
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* end of write broadcasting */
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg &= ~SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev);
if (ret) {
dev_err(dev, "failed to load firmware %s, ret: %d\n",
MSCC_VSC8584_REVB_INT8051_FW, ret);
return ret;
}
/* Add one byte to size for the one added by the patch_fw function */
ret = vsc8584_get_fw_crc(phydev,
MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
fw->size + 1, &crc);
if (ret)
goto out;
if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) {
dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
if (vsc8584_patch_fw(phydev, fw))
dev_warn(dev,
"failed to patch FW, expect non-optimal device\n");
}
vsc8584_micro_deassert_reset(phydev, false);
/* Add one byte to size for the one added by the patch_fw function */
ret = vsc8584_get_fw_crc(phydev,
MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
fw->size + 1, &crc);
if (ret)
goto out;
if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC)
dev_warn(dev,
"FW CRC after patching is not the expected one, expect non-optimal device\n");
ret = vsc8584_micro_assert_reset(phydev);
if (ret)
goto out;
/* Write patch vector 0, to skip IB cal polling */
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO);
reg = MSCC_ROM_TRAP_SERDES_6G_CFG; /* ROM address to trap, for patch vector 0 */
ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
if (ret)
goto out;
reg = MSCC_RAM_TRAP_SERDES_6G_CFG; /* RAM address to jump to, when patch vector 0 enabled */
ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
if (ret)
goto out;
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
if (ret)
goto out;
vsc8584_micro_deassert_reset(phydev, true);
out:
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
release_firmware(fw);
return ret;
}
static void vsc8584_get_base_addr(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
u16 val, addr;
phy_lock_mdio_bus(phydev);
__phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
addr >>= PHY_CNTL_4_ADDR_POS;
val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
__phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
phy_unlock_mdio_bus(phydev);
/* In the package, there are two pairs of PHYs (PHY0 + PHY2 and
* PHY1 + PHY3). The first PHY of each pair (PHY0 and PHY1) is
* the base PHY for timestamping operations.
*/
vsc8531->ts_base_addr = phydev->mdio.addr;
vsc8531->ts_base_phy = addr;
if (val & PHY_ADDR_REVERSED) {
vsc8531->base_addr = phydev->mdio.addr + addr;
if (addr > 1) {
vsc8531->ts_base_addr += 2;
vsc8531->ts_base_phy += 2;
}
} else {
vsc8531->base_addr = phydev->mdio.addr - addr;
if (addr > 1) {
vsc8531->ts_base_addr -= 2;
vsc8531->ts_base_phy -= 2;
}
}
vsc8531->addr = addr;
}
static void vsc85xx_coma_mode_release(struct phy_device *phydev)
{
/* The coma mode (pin or reg) provides an optional feature that
* may be used to control when the PHYs become active.
* Alternatively the COMA_MODE pin may be connected low
* so that the PHYs are fully active once out of reset.
*/
/* Enable output (mode=0) and write zero to it */
vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_EXTENDED_GPIO);
__phy_modify(phydev, MSCC_PHY_GPIO_CONTROL_2,
MSCC_PHY_COMA_MODE | MSCC_PHY_COMA_OUTPUT, 0);
vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD);
}
static int vsc8584_config_host_serdes(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
int ret;
u16 val;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
if (ret)
return ret;
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) {
val |= MAC_CFG_QSGMII;
} else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val |= MAC_CFG_SGMII;
} else {
ret = -EINVAL;
return ret;
}
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
return ret;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
if (ret)
return ret;
val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
PROC_CMD_READ_MOD_WRITE_PORT;
if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
val |= PROC_CMD_QSGMII_MAC;
else
val |= PROC_CMD_SGMII_MAC;
ret = vsc8584_cmd(phydev, val);
if (ret)
return ret;
usleep_range(10000, 20000);
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
if (ret)
return ret;
/* Disable SerDes for 1000Base-X */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
if (ret)
return ret;
return vsc85xx_sd6g_config_v2(phydev);
}
static int vsc8574_config_host_serdes(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
int ret;
u16 val;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
if (ret)
return ret;
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) {
val |= MAC_CFG_QSGMII;
} else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val |= MAC_CFG_SGMII;
} else if (phy_interface_is_rgmii(phydev)) {
val |= MAC_CFG_RGMII;
} else {
ret = -EINVAL;
return ret;
}
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
return ret;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
if (ret)
return ret;
if (!phy_interface_is_rgmii(phydev)) {
val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
PROC_CMD_READ_MOD_WRITE_PORT;
if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
val |= PROC_CMD_QSGMII_MAC;
else
val |= PROC_CMD_SGMII_MAC;
ret = vsc8584_cmd(phydev, val);
if (ret)
return ret;
usleep_range(10000, 20000);
}
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
if (ret)
return ret;
/* Disable SerDes for 1000Base-X */
return vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
}
static int vsc8584_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
int ret, i;
u16 val;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
phy_lock_mdio_bus(phydev);
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
* microprocessor common to all PHYs. The CRC check responsible of the
* checking the firmware within the 8051 microprocessor can only be
* accessed via the PHY whose internal address in the package is 0.
* All PHYs' interrupts mask register has to be zeroed before enabling
* any PHY's interrupt in this register.
* For all these reasons, we need to do the init sequence once and only
* once whatever is the first PHY in the package that is initialized and
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
if (phy_package_init_once(phydev)) {
/* The following switch statement assumes that the lowest
* nibble of the phy_id_mask is always 0. This works because
* the lowest nibble of the PHY_ID's below are also 0.
*/
WARN_ON(phydev->drv->phy_id_mask & 0xf);
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_VSC8504:
case PHY_ID_VSC8552:
case PHY_ID_VSC8572:
case PHY_ID_VSC8574:
ret = vsc8574_config_pre_init(phydev);
if (ret)
goto err;
ret = vsc8574_config_host_serdes(phydev);
if (ret)
goto err;
break;
case PHY_ID_VSC856X:
case PHY_ID_VSC8575:
case PHY_ID_VSC8582:
case PHY_ID_VSC8584:
ret = vsc8584_config_pre_init(phydev);
if (ret)
goto err;
ret = vsc8584_config_host_serdes(phydev);
if (ret)
goto err;
vsc85xx_coma_mode_release(phydev);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
goto err;
}
phy_unlock_mdio_bus(phydev);
ret = vsc8584_macsec_init(phydev);
if (ret)
return ret;
ret = vsc8584_ptp_init(phydev);
if (ret)
return ret;
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
val |= (MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS) |
(VSC8584_MAC_IF_SELECTION_SGMII << VSC8584_MAC_IF_SELECTION_POS);
ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
if (ret)
return ret;
ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
VSC8572_RGMII_RX_DELAY_MASK,
VSC8572_RGMII_TX_DELAY_MASK);
if (ret)
return ret;
ret = genphy_soft_reset(phydev);
if (ret)
return ret;
for (i = 0; i < vsc8531->nleds; i++) {
ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
if (ret)
return ret;
}
return 0;
err:
phy_unlock_mdio_bus(phydev);
return ret;
}
static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
{
irqreturn_t ret;
int irq_status;
irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
if (irq_status < 0)
return IRQ_NONE;
/* Timestamping IRQ does not set a bit in the global INT_STATUS, so
* irq_status would be 0.
*/
ret = vsc8584_handle_ts_interrupt(phydev);
if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
return ret;
if (irq_status & MII_VSC85XX_INT_MASK_EXT)
vsc8584_handle_macsec_interrupt(phydev);
if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i, phy_id;
struct vsc8531_private *vsc8531 = phydev->priv;
rc = vsc85xx_default_config(phydev);
if (rc)
return rc;
rc = vsc85xx_mac_if_set(phydev, phydev->interface);
if (rc)
return rc;
rc = vsc85xx_edge_rate_cntl_set(phydev, vsc8531->rate_magic);
if (rc)
return rc;
phy_id = phydev->drv->phy_id & phydev->drv->phy_id_mask;
if (PHY_ID_VSC8531 == phy_id || PHY_ID_VSC8541 == phy_id ||
PHY_ID_VSC8530 == phy_id || PHY_ID_VSC8540 == phy_id) {
rc = vsc8531_pre_init_seq_set(phydev);
if (rc)
return rc;
}
rc = vsc85xx_eee_init_seq_set(phydev);
if (rc)
return rc;
for (i = 0; i < vsc8531->nleds; i++) {
rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
if (rc)
return rc;
}
return 0;
}
static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
u32 op)
{
unsigned long deadline;
u32 val;
int ret;
ret = vsc85xx_csr_write(phydev, PHY_MCB_TARGET, reg,
op | (1 << mcb));
if (ret)
return -EINVAL;
deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
do {
usleep_range(500, 1000);
val = vsc85xx_csr_read(phydev, PHY_MCB_TARGET, reg);
if (val == 0xffffffff)
return -EIO;
} while (time_before(jiffies, deadline) && (val & op));
if (val & op)
return -ETIMEDOUT;
return 0;
}
/* Trigger a read to the specified MCB */
int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
}
/* Trigger a write to the specified MCB */
int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
}
static int vsc8514_config_host_serdes(struct phy_device *phydev)
{
int ret;
u16 val;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
if (ret)
return ret;
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
val |= MAC_CFG_QSGMII;
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
return ret;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
if (ret)
return ret;
ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
if (ret)
return ret;
ret = vsc8584_cmd(phydev,
PROC_CMD_MCB_ACCESS_MAC_CONF |
PROC_CMD_RST_CONF_PORT |
PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
if (ret) {
dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
__func__, ret);
return ret;
}
/* Apply 6G SerDes FOJI Algorithm
* Initial condition requirement:
* 1. hold 8051 in reset
* 2. disable patch vector 0, in order to allow IB cal poll during FoJi
* 3. deassert 8051 reset after change patch vector status
* 4. proceed with FoJi (vsc85xx_sd6g_config_v2)
*/
vsc8584_micro_assert_reset(phydev);
val = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
/* clear bit 8, to disable patch vector 0 */
val &= ~PATCH_VEC_ZERO_EN;
ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, val);
/* Enable 8051 clock, don't set patch present, disable PRAM clock override */
vsc8584_micro_deassert_reset(phydev, false);
return vsc85xx_sd6g_config_v2(phydev);
}
static int vsc8514_config_pre_init(struct phy_device *phydev)
{
/* These are the settings to override the silicon default
* values to handle hardware performance of PHY. They
* are set at Power-On state and remain until PHY Reset.
*/
static const struct reg_val pre_init1[] = {
{0x0f90, 0x00688980},
{0x0786, 0x00000003},
{0x07fa, 0x0050100f},
{0x0f82, 0x0012b002},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
{0x17a2, 0x00000620},
{0x16a0, 0x00eeffdd},
{0x16a6, 0x00071448},
{0x16a4, 0x0013132f},
{0x16a8, 0x00000000},
{0x0ffc, 0x00c0a028},
{0x0fe8, 0x0091b06c},
{0x0fea, 0x00041600},
{0x0f80, 0x00fffaff},
{0x0fec, 0x00901809},
{0x0ffe, 0x00b01007},
{0x16b0, 0x00eeff00},
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
struct device *dev = &phydev->mdio.dev;
unsigned int i;
u16 reg;
int ret;
ret = vsc8584_pll5g_reset(phydev);
if (ret < 0) {
dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
return ret;
}
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* all writes below are broadcasted to all PHYs in the same package */
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg |= SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg |= BIT(15);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
reg &= ~BIT(15);
phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
reg &= ~SMI_BROADCAST_WR_EN;
phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
/* Add pre-patching commands to:
* 1. enable 8051 clock, operate 8051 clock at 125 MHz
* instead of HW default 62.5MHz
* 2. write patch vector 0, to skip IB cal polling executed
* as part of the 0x80E0 ROM command
*/
vsc8584_micro_deassert_reset(phydev, false);
vsc8584_micro_assert_reset(phydev);
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
/* ROM address to trap, for patch vector 0 */
reg = MSCC_ROM_TRAP_SERDES_6G_CFG;
ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
if (ret)
goto err;
/* RAM address to jump to, when patch vector 0 enabled */
reg = MSCC_RAM_TRAP_SERDES_6G_CFG;
ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
if (ret)
goto err;
reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
if (ret)
goto err;
/* Enable 8051 clock, don't set patch present
* yet, disable PRAM clock override
*/
vsc8584_micro_deassert_reset(phydev, false);
return ret;
err:
/* restore 8051 and bail w error */
vsc8584_micro_deassert_reset(phydev, false);
return ret;
}
static int vsc8514_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
int ret, i;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
phy_lock_mdio_bus(phydev);
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
* microprocessor common to all PHYs.
* All PHYs' interrupts mask register has to be zeroed before enabling
* any PHY's interrupt in this register.
* For all these reasons, we need to do the init sequence once and only
* once whatever is the first PHY in the package that is initialized and
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
if (phy_package_init_once(phydev)) {
ret = vsc8514_config_pre_init(phydev);
if (ret)
goto err;
ret = vsc8514_config_host_serdes(phydev);
if (ret)
goto err;
vsc85xx_coma_mode_release(phydev);
}
phy_unlock_mdio_bus(phydev);
ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS);
if (ret)
return ret;
ret = genphy_soft_reset(phydev);
if (ret)
return ret;
for (i = 0; i < vsc8531->nleds; i++) {
ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
if (ret)
return ret;
}
return ret;
err:
phy_unlock_mdio_bus(phydev);
return ret;
}
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
{
int rc = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
return (rc < 0) ? rc : 0;
}
static int vsc85xx_config_intr(struct phy_device *phydev)
{
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
rc = vsc85xx_ack_interrupt(phydev);
if (rc)
return rc;
vsc8584_config_macsec_intr(phydev);
vsc8584_config_ts_intr(phydev);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0);
if (rc < 0)
return rc;
rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
if (rc < 0)
return rc;
rc = vsc85xx_ack_interrupt(phydev);
}
return rc;
}
static irqreturn_t vsc85xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
return IRQ_NONE;
phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
static int vsc85xx_config_aneg(struct phy_device *phydev)
{
int rc;
rc = vsc85xx_mdix_set(phydev, phydev->mdix_ctrl);
if (rc < 0)
return rc;
return genphy_config_aneg(phydev);
}
static int vsc85xx_read_status(struct phy_device *phydev)
{
int rc;
rc = vsc85xx_mdix_get(phydev, &phydev->mdix);
if (rc < 0)
return rc;
return genphy_read_status(phydev);
}
static int vsc8514_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
VSC8531_DUPLEX_COLLISION};
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
return -ENOMEM;
phydev->priv = vsc8531;
vsc8584_get_base_addr(phydev);
devm_phy_package_join(&phydev->mdio.dev, phydev,
vsc8531->base_addr, 0);
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
static int vsc8574_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
VSC8531_DUPLEX_COLLISION};
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
return -ENOMEM;
phydev->priv = vsc8531;
vsc8584_get_base_addr(phydev);
devm_phy_package_join(&phydev->mdio.dev, phydev,
vsc8531->base_addr, 0);
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
static int vsc8584_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
VSC8531_DUPLEX_COLLISION};
int ret;
if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
return -ENOTSUPP;
}
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
return -ENOMEM;
phydev->priv = vsc8531;
vsc8584_get_base_addr(phydev);
devm_phy_package_join(&phydev->mdio.dev, phydev, vsc8531->base_addr,
sizeof(struct vsc85xx_shared_private));
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
if (phy_package_probe_once(phydev)) {
ret = vsc8584_ptp_probe_once(phydev);
if (ret)
return ret;
}
ret = vsc8584_ptp_probe(phydev);
if (ret)
return ret;
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
static int vsc85xx_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
int rate_magic;
u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY,
VSC8531_LINK_100_ACTIVITY};
rate_magic = vsc85xx_edge_rate_magic_get(phydev);
if (rate_magic < 0)
return rate_magic;
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
return -ENOMEM;
phydev->priv = vsc8531;
vsc8531->rate_magic = rate_magic;
vsc8531->nleds = 2;
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
.phy_id = PHY_ID_VSC8501,
.name = "Microsemi GE VSC8501 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8502,
.name = "Microsemi GE VSC8502 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8504,
.name = "Microsemi GE VSC8504 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8514,
.name = "Microsemi GE VSC8514 SyncE",
.phy_id_mask = 0xfffffff0,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8514_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8514_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8530,
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8531,
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8540,
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8541,
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8552,
.name = "Microsemi GE VSC8552 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC856X,
.name = "Microsemi GE VSC856X SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8572,
.name = "Microsemi GE VSC8572 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8575,
.name = "Microsemi GE VSC8575 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8582,
.name = "Microsemi GE VSC8582 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
.read_page = &vsc85xx_phy_read_page,
.write_page = &vsc85xx_phy_write_page,
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
.link_change_notify = &vsc85xx_link_change_notify,
}
};
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_MATCH_VENDOR(PHY_VENDOR_MSCC) },
{ }
};
MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
MODULE_AUTHOR("Nagaraju Lakkaraju");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <[email protected]>
*/
#ifndef __XFS_AG_RESV_H__
#define __XFS_AG_RESV_H__
void xfs_ag_resv_free(struct xfs_perag *pag);
int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag,
enum xfs_ag_resv_type type);
void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
struct xfs_alloc_arg *args);
void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
struct xfs_trans *tp, xfs_extlen_t len);
static inline struct xfs_ag_resv *
xfs_perag_resv(
struct xfs_perag *pag,
enum xfs_ag_resv_type type)
{
switch (type) {
case XFS_AG_RESV_METADATA:
return &pag->pag_meta_resv;
case XFS_AG_RESV_RMAPBT:
return &pag->pag_rmapbt_resv;
default:
return NULL;
}
}
#endif /* __XFS_AG_RESV_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI TRF7970a RFID/NFC Transceiver Driver
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Erick Macias <[email protected]>
* Author: Felipe Balbi <[email protected]>
* Author: Mark A. Greer <[email protected]>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/nfc.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <net/nfc/nfc.h>
#include <net/nfc/digital.h>
/* There are 3 ways the host can communicate with the trf7970a:
* parallel mode, SPI with Slave Select (SS) mode, and SPI without
* SS mode. The driver only supports the two SPI modes.
*
* The trf7970a is very timing sensitive and the VIN, EN2, and EN
* pins must asserted in that order and with specific delays in between.
* The delays used in the driver were provided by TI and have been
* confirmed to work with this driver. There is a bug with the current
* version of the trf7970a that requires that EN2 remain low no matter
* what. If it goes high, it will generate an RF field even when in
* passive target mode. TI has indicated that the chip will work okay
* when EN2 is left low. The 'en2-rf-quirk' device tree property
* indicates that trf7970a currently being used has the erratum and
* that EN2 must be kept low.
*
* Timeouts are implemented using the delayed workqueue kernel facility.
* Timeouts are required so things don't hang when there is no response
* from the trf7970a (or tag). Using this mechanism creates a race with
* interrupts, however. That is, an interrupt and a timeout could occur
* closely enough together that one is blocked by the mutex while the other
* executes. When the timeout handler executes first and blocks the
* interrupt handler, it will eventually set the state to IDLE so the
* interrupt handler will check the state and exit with no harm done.
* When the interrupt handler executes first and blocks the timeout handler,
* the cancel_delayed_work() call will know that it didn't cancel the
* work item (i.e., timeout) and will return zero. That return code is
* used by the timer handler to indicate that it should ignore the timeout
* once its unblocked.
*
* Aborting an active command isn't as simple as it seems because the only
* way to abort a command that's already been sent to the tag is so turn
* off power to the tag. If we do that, though, we'd have to go through
* the entire anticollision procedure again but the digital layer doesn't
* support that. So, if an abort is received before trf7970a_send_cmd()
* has sent the command to the tag, it simply returns -ECANCELED. If the
* command has already been sent to the tag, then the driver continues
* normally and recieves the response data (or error) but just before
* sending the data upstream, it frees the rx_skb and sends -ECANCELED
* upstream instead. If the command failed, that error will be sent
* upstream.
*
* When recieving data from a tag and the interrupt status register has
* only the SRX bit set, it means that all of the data has been received
* (once what's in the fifo has been read). However, depending on timing
* an interrupt status with only the SRX bit set may not be recived. In
* those cases, the timeout mechanism is used to wait 20 ms in case more
* data arrives. After 20 ms, it is assumed that all of the data has been
* received and the accumulated rx data is sent upstream. The
* 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
* (i.e., it indicates that some data has been received but we're not sure
* if there is more coming so a timeout in this state means all data has
* been received and there isn't an error). The delay is 20 ms since delays
* of ~16 ms have been observed during testing.
*
* When transmitting a frame larger than the FIFO size (127 bytes), the
* driver will wait 20 ms for the FIFO to drain past the low-watermark
* and generate an interrupt. The low-watermark set to 32 bytes so the
* interrupt should fire after 127 - 32 = 95 bytes have been sent. At
* the lowest possible bit rate (6.62 kbps for 15693), it will take up
* to ~14.35 ms so 20 ms is used for the timeout.
*
* Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
* Having only 4 bits in the FIFO won't normally generate an interrupt so
* driver enables the '4_bit_RX' bit of the Special Functions register 1
* to cause an interrupt in that case. Leaving that bit for a read command
* messes up the data returned so it is only enabled when the framing is
* 'NFC_DIGITAL_FRAMING_NFCA_T2T' and the command is not a read command.
* Unfortunately, that means that the driver has to peek into tx frames
* when the framing is 'NFC_DIGITAL_FRAMING_NFCA_T2T'. This is done by
* the trf7970a_per_cmd_config() routine.
*
* ISO/IEC 15693 frames specify whether to use single or double sub-carrier
* frequencies and whether to use low or high data rates in the flags byte
* of the frame. This means that the driver has to peek at all 15693 frames
* to determine what speed to set the communication to. In addition, write
* and lock commands use the OPTION flag to indicate that an EOF must be
* sent to the tag before it will send its response. So the driver has to
* examine all frames for that reason too.
*
* It is unclear how long to wait before sending the EOF. According to the
* Note under Table 1-1 in section 1.6 of
* http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least
* 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long
* enough so 20 ms is used. So the timer is set to 40 ms - 20 ms to drain
* up to 127 bytes in the FIFO at the lowest bit rate plus another 20 ms to
* ensure the wait is long enough before sending the EOF. This seems to work
* reliably.
*/
#define TRF7970A_SUPPORTED_PROTOCOLS \
(NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \
NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_FELICA_MASK | \
NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK)
#define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */
#define TRF7970A_13MHZ_CLOCK_FREQUENCY 13560000
#define TRF7970A_27MHZ_CLOCK_FREQUENCY 27120000
#define TRF7970A_RX_SKB_ALLOC_SIZE 256
#define TRF7970A_FIFO_SIZE 127
/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
#define TRF7970A_TX_MAX (4096 - 1)
#define TRF7970A_WAIT_FOR_TX_IRQ 20
#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 20
#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 20
#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 40
/* Guard times for various RF technologies (in us) */
#define TRF7970A_GUARD_TIME_NFCA 5000
#define TRF7970A_GUARD_TIME_NFCB 5000
#define TRF7970A_GUARD_TIME_NFCF 20000
#define TRF7970A_GUARD_TIME_15693 1000
/* Quirks */
/* Erratum: When reading IRQ Status register on trf7970a, we must issue a
* read continuous command for IRQ Status and Collision Position registers.
*/
#define TRF7970A_QUIRK_IRQ_STATUS_READ BIT(0)
#define TRF7970A_QUIRK_EN2_MUST_STAY_LOW BIT(1)
/* Direct commands */
#define TRF7970A_CMD_IDLE 0x00
#define TRF7970A_CMD_SOFT_INIT 0x03
#define TRF7970A_CMD_RF_COLLISION 0x04
#define TRF7970A_CMD_RF_COLLISION_RESPONSE_N 0x05
#define TRF7970A_CMD_RF_COLLISION_RESPONSE_0 0x06
#define TRF7970A_CMD_FIFO_RESET 0x0f
#define TRF7970A_CMD_TRANSMIT_NO_CRC 0x10
#define TRF7970A_CMD_TRANSMIT 0x11
#define TRF7970A_CMD_DELAY_TRANSMIT_NO_CRC 0x12
#define TRF7970A_CMD_DELAY_TRANSMIT 0x13
#define TRF7970A_CMD_EOF 0x14
#define TRF7970A_CMD_CLOSE_SLOT 0x15
#define TRF7970A_CMD_BLOCK_RX 0x16
#define TRF7970A_CMD_ENABLE_RX 0x17
#define TRF7970A_CMD_TEST_INT_RF 0x18
#define TRF7970A_CMD_TEST_EXT_RF 0x19
#define TRF7970A_CMD_RX_GAIN_ADJUST 0x1a
/* Bits determining whether its a direct command or register R/W,
* whether to use a continuous SPI transaction or not, and the actual
* direct cmd opcode or register address.
*/
#define TRF7970A_CMD_BIT_CTRL BIT(7)
#define TRF7970A_CMD_BIT_RW BIT(6)
#define TRF7970A_CMD_BIT_CONTINUOUS BIT(5)
#define TRF7970A_CMD_BIT_OPCODE(opcode) ((opcode) & 0x1f)
/* Registers addresses */
#define TRF7970A_CHIP_STATUS_CTRL 0x00
#define TRF7970A_ISO_CTRL 0x01
#define TRF7970A_ISO14443B_TX_OPTIONS 0x02
#define TRF7970A_ISO14443A_HIGH_BITRATE_OPTIONS 0x03
#define TRF7970A_TX_TIMER_SETTING_H_BYTE 0x04
#define TRF7970A_TX_TIMER_SETTING_L_BYTE 0x05
#define TRF7970A_TX_PULSE_LENGTH_CTRL 0x06
#define TRF7970A_RX_NO_RESPONSE_WAIT 0x07
#define TRF7970A_RX_WAIT_TIME 0x08
#define TRF7970A_MODULATOR_SYS_CLK_CTRL 0x09
#define TRF7970A_RX_SPECIAL_SETTINGS 0x0a
#define TRF7970A_REG_IO_CTRL 0x0b
#define TRF7970A_IRQ_STATUS 0x0c
#define TRF7970A_COLLISION_IRQ_MASK 0x0d
#define TRF7970A_COLLISION_POSITION 0x0e
#define TRF7970A_RSSI_OSC_STATUS 0x0f
#define TRF7970A_SPECIAL_FCN_REG1 0x10
#define TRF7970A_SPECIAL_FCN_REG2 0x11
#define TRF7970A_RAM1 0x12
#define TRF7970A_RAM2 0x13
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS 0x14
#define TRF7970A_NFC_LOW_FIELD_LEVEL 0x16
#define TRF7970A_NFCID1 0x17
#define TRF7970A_NFC_TARGET_LEVEL 0x18
#define TRF79070A_NFC_TARGET_PROTOCOL 0x19
#define TRF7970A_TEST_REGISTER1 0x1a
#define TRF7970A_TEST_REGISTER2 0x1b
#define TRF7970A_FIFO_STATUS 0x1c
#define TRF7970A_TX_LENGTH_BYTE1 0x1d
#define TRF7970A_TX_LENGTH_BYTE2 0x1e
#define TRF7970A_FIFO_IO_REGISTER 0x1f
/* Chip Status Control Register Bits */
#define TRF7970A_CHIP_STATUS_VRS5_3 BIT(0)
#define TRF7970A_CHIP_STATUS_REC_ON BIT(1)
#define TRF7970A_CHIP_STATUS_AGC_ON BIT(2)
#define TRF7970A_CHIP_STATUS_PM_ON BIT(3)
#define TRF7970A_CHIP_STATUS_RF_PWR BIT(4)
#define TRF7970A_CHIP_STATUS_RF_ON BIT(5)
#define TRF7970A_CHIP_STATUS_DIRECT BIT(6)
#define TRF7970A_CHIP_STATUS_STBY BIT(7)
/* ISO Control Register Bits */
#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_662 0x00
#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_662 0x01
#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648 0x02
#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_2648 0x03
#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a 0x04
#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_667 0x05
#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669 0x06
#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_2669 0x07
#define TRF7970A_ISO_CTRL_14443A_106 0x08
#define TRF7970A_ISO_CTRL_14443A_212 0x09
#define TRF7970A_ISO_CTRL_14443A_424 0x0a
#define TRF7970A_ISO_CTRL_14443A_848 0x0b
#define TRF7970A_ISO_CTRL_14443B_106 0x0c
#define TRF7970A_ISO_CTRL_14443B_212 0x0d
#define TRF7970A_ISO_CTRL_14443B_424 0x0e
#define TRF7970A_ISO_CTRL_14443B_848 0x0f
#define TRF7970A_ISO_CTRL_FELICA_212 0x1a
#define TRF7970A_ISO_CTRL_FELICA_424 0x1b
#define TRF7970A_ISO_CTRL_NFC_NFCA_106 0x01
#define TRF7970A_ISO_CTRL_NFC_NFCF_212 0x02
#define TRF7970A_ISO_CTRL_NFC_NFCF_424 0x03
#define TRF7970A_ISO_CTRL_NFC_CE_14443A 0x00
#define TRF7970A_ISO_CTRL_NFC_CE_14443B 0x01
#define TRF7970A_ISO_CTRL_NFC_CE BIT(2)
#define TRF7970A_ISO_CTRL_NFC_ACTIVE BIT(3)
#define TRF7970A_ISO_CTRL_NFC_INITIATOR BIT(4)
#define TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE BIT(5)
#define TRF7970A_ISO_CTRL_RFID BIT(5)
#define TRF7970A_ISO_CTRL_DIR_MODE BIT(6)
#define TRF7970A_ISO_CTRL_RX_CRC_N BIT(7) /* true == No CRC */
#define TRF7970A_ISO_CTRL_RFID_SPEED_MASK 0x1f
/* Modulator and SYS_CLK Control Register Bits */
#define TRF7970A_MODULATOR_DEPTH(n) ((n) & 0x7)
#define TRF7970A_MODULATOR_DEPTH_ASK10 (TRF7970A_MODULATOR_DEPTH(0))
#define TRF7970A_MODULATOR_DEPTH_OOK (TRF7970A_MODULATOR_DEPTH(1))
#define TRF7970A_MODULATOR_DEPTH_ASK7 (TRF7970A_MODULATOR_DEPTH(2))
#define TRF7970A_MODULATOR_DEPTH_ASK8_5 (TRF7970A_MODULATOR_DEPTH(3))
#define TRF7970A_MODULATOR_DEPTH_ASK13 (TRF7970A_MODULATOR_DEPTH(4))
#define TRF7970A_MODULATOR_DEPTH_ASK16 (TRF7970A_MODULATOR_DEPTH(5))
#define TRF7970A_MODULATOR_DEPTH_ASK22 (TRF7970A_MODULATOR_DEPTH(6))
#define TRF7970A_MODULATOR_DEPTH_ASK30 (TRF7970A_MODULATOR_DEPTH(7))
#define TRF7970A_MODULATOR_EN_ANA BIT(3)
#define TRF7970A_MODULATOR_CLK(n) (((n) & 0x3) << 4)
#define TRF7970A_MODULATOR_CLK_DISABLED (TRF7970A_MODULATOR_CLK(0))
#define TRF7970A_MODULATOR_CLK_3_6 (TRF7970A_MODULATOR_CLK(1))
#define TRF7970A_MODULATOR_CLK_6_13 (TRF7970A_MODULATOR_CLK(2))
#define TRF7970A_MODULATOR_CLK_13_27 (TRF7970A_MODULATOR_CLK(3))
#define TRF7970A_MODULATOR_EN_OOK BIT(6)
#define TRF7970A_MODULATOR_27MHZ BIT(7)
#define TRF7970A_RX_SPECIAL_SETTINGS_NO_LIM BIT(0)
#define TRF7970A_RX_SPECIAL_SETTINGS_AGCR BIT(1)
#define TRF7970A_RX_SPECIAL_SETTINGS_GD_0DB (0x0 << 2)
#define TRF7970A_RX_SPECIAL_SETTINGS_GD_5DB (0x1 << 2)
#define TRF7970A_RX_SPECIAL_SETTINGS_GD_10DB (0x2 << 2)
#define TRF7970A_RX_SPECIAL_SETTINGS_GD_15DB (0x3 << 2)
#define TRF7970A_RX_SPECIAL_SETTINGS_HBT BIT(4)
#define TRF7970A_RX_SPECIAL_SETTINGS_M848 BIT(5)
#define TRF7970A_RX_SPECIAL_SETTINGS_C424 BIT(6)
#define TRF7970A_RX_SPECIAL_SETTINGS_C212 BIT(7)
#define TRF7970A_REG_IO_CTRL_VRS(v) ((v) & 0x07)
#define TRF7970A_REG_IO_CTRL_IO_LOW BIT(5)
#define TRF7970A_REG_IO_CTRL_EN_EXT_PA BIT(6)
#define TRF7970A_REG_IO_CTRL_AUTO_REG BIT(7)
/* IRQ Status Register Bits */
#define TRF7970A_IRQ_STATUS_NORESP BIT(0) /* ISO15693 only */
#define TRF7970A_IRQ_STATUS_NFC_COL_ERROR BIT(0)
#define TRF7970A_IRQ_STATUS_COL BIT(1)
#define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR BIT(2)
#define TRF7970A_IRQ_STATUS_NFC_RF BIT(2)
#define TRF7970A_IRQ_STATUS_PARITY_ERROR BIT(3)
#define TRF7970A_IRQ_STATUS_NFC_SDD BIT(3)
#define TRF7970A_IRQ_STATUS_CRC_ERROR BIT(4)
#define TRF7970A_IRQ_STATUS_NFC_PROTO_ERROR BIT(4)
#define TRF7970A_IRQ_STATUS_FIFO BIT(5)
#define TRF7970A_IRQ_STATUS_SRX BIT(6)
#define TRF7970A_IRQ_STATUS_TX BIT(7)
#define TRF7970A_IRQ_STATUS_ERROR \
(TRF7970A_IRQ_STATUS_COL | \
TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR | \
TRF7970A_IRQ_STATUS_PARITY_ERROR | \
TRF7970A_IRQ_STATUS_CRC_ERROR)
#define TRF7970A_RSSI_OSC_STATUS_RSSI_MASK (BIT(2) | BIT(1) | BIT(0))
#define TRF7970A_RSSI_OSC_STATUS_RSSI_X_MASK (BIT(5) | BIT(4) | BIT(3))
#define TRF7970A_RSSI_OSC_STATUS_RSSI_OSC_OK BIT(6)
#define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0)
#define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1)
#define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX BIT(2)
#define TRF7970A_SPECIAL_FCN_REG1_SP_DIR_MODE BIT(3)
#define TRF7970A_SPECIAL_FCN_REG1_NEXT_SLOT_37US BIT(4)
#define TRF7970A_SPECIAL_FCN_REG1_PAR43 BIT(5)
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_124 (0x0 << 2)
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_120 (0x1 << 2)
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_112 (0x2 << 2)
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 (0x3 << 2)
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_4 0x0
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_8 0x1
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16 0x2
#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32 0x3
#define TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(v) ((v) & 0x07)
#define TRF7970A_NFC_LOW_FIELD_LEVEL_CLEX_DIS BIT(7)
#define TRF7970A_NFC_TARGET_LEVEL_RFDET(v) ((v) & 0x07)
#define TRF7970A_NFC_TARGET_LEVEL_HI_RF BIT(3)
#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN BIT(5)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES (0x0 << 6)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES (0x1 << 6)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES (0x2 << 6)
#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106 BIT(0)
#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212 BIT(1)
#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424 (BIT(0) | BIT(1))
#define TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B BIT(2)
#define TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 BIT(3)
#define TRF79070A_NFC_TARGET_PROTOCOL_FELICA BIT(4)
#define TRF79070A_NFC_TARGET_PROTOCOL_RF_L BIT(6)
#define TRF79070A_NFC_TARGET_PROTOCOL_RF_H BIT(7)
#define TRF79070A_NFC_TARGET_PROTOCOL_106A \
(TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \
TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \
TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 | \
TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106)
#define TRF79070A_NFC_TARGET_PROTOCOL_106B \
(TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \
TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \
TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B | \
TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106)
#define TRF79070A_NFC_TARGET_PROTOCOL_212F \
(TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \
TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \
TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \
TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212)
#define TRF79070A_NFC_TARGET_PROTOCOL_424F \
(TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \
TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \
TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \
TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424)
#define TRF7970A_FIFO_STATUS_OVERFLOW BIT(7)
/* NFC (ISO/IEC 14443A) Type 2 Tag commands */
#define NFC_T2T_CMD_READ 0x30
/* ISO 15693 commands codes */
#define ISO15693_CMD_INVENTORY 0x01
#define ISO15693_CMD_READ_SINGLE_BLOCK 0x20
#define ISO15693_CMD_WRITE_SINGLE_BLOCK 0x21
#define ISO15693_CMD_LOCK_BLOCK 0x22
#define ISO15693_CMD_READ_MULTIPLE_BLOCK 0x23
#define ISO15693_CMD_WRITE_MULTIPLE_BLOCK 0x24
#define ISO15693_CMD_SELECT 0x25
#define ISO15693_CMD_RESET_TO_READY 0x26
#define ISO15693_CMD_WRITE_AFI 0x27
#define ISO15693_CMD_LOCK_AFI 0x28
#define ISO15693_CMD_WRITE_DSFID 0x29
#define ISO15693_CMD_LOCK_DSFID 0x2a
#define ISO15693_CMD_GET_SYSTEM_INFO 0x2b
#define ISO15693_CMD_GET_MULTIPLE_BLOCK_SECURITY_STATUS 0x2c
/* ISO 15693 request and response flags */
#define ISO15693_REQ_FLAG_SUB_CARRIER BIT(0)
#define ISO15693_REQ_FLAG_DATA_RATE BIT(1)
#define ISO15693_REQ_FLAG_INVENTORY BIT(2)
#define ISO15693_REQ_FLAG_PROTOCOL_EXT BIT(3)
#define ISO15693_REQ_FLAG_SELECT BIT(4)
#define ISO15693_REQ_FLAG_AFI BIT(4)
#define ISO15693_REQ_FLAG_ADDRESS BIT(5)
#define ISO15693_REQ_FLAG_NB_SLOTS BIT(5)
#define ISO15693_REQ_FLAG_OPTION BIT(6)
#define ISO15693_REQ_FLAG_SPEED_MASK \
(ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE)
enum trf7970a_state {
TRF7970A_ST_PWR_OFF,
TRF7970A_ST_RF_OFF,
TRF7970A_ST_IDLE,
TRF7970A_ST_IDLE_RX_BLOCKED,
TRF7970A_ST_WAIT_FOR_TX_FIFO,
TRF7970A_ST_WAIT_FOR_RX_DATA,
TRF7970A_ST_WAIT_FOR_RX_DATA_CONT,
TRF7970A_ST_WAIT_TO_ISSUE_EOF,
TRF7970A_ST_LISTENING,
TRF7970A_ST_LISTENING_MD,
TRF7970A_ST_MAX
};
struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
struct regulator *vin_regulator;
struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
bool aborting;
struct sk_buff *tx_skb;
struct sk_buff *rx_skb;
nfc_digital_cmd_complete_t cb;
void *cb_arg;
u8 chip_status_ctrl;
u8 iso_ctrl;
u8 iso_ctrl_tech;
u8 modulator_sys_clk_ctrl;
u8 special_fcn_reg1;
u8 io_ctrl;
unsigned int guard_time;
int technology;
int framing;
u8 md_rf_tech;
u8 tx_cmd;
bool issue_eof;
struct gpio_desc *en_gpiod;
struct gpio_desc *en2_gpiod;
struct mutex lock;
unsigned int timeout;
bool ignore_timeout;
struct delayed_work timeout_work;
};
static int trf7970a_cmd(struct trf7970a *trf, u8 opcode)
{
u8 cmd = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(opcode);
int ret;
dev_dbg(trf->dev, "cmd: 0x%x\n", cmd);
ret = spi_write(trf->spi, &cmd, 1);
if (ret)
dev_err(trf->dev, "%s - cmd: 0x%x, ret: %d\n", __func__, cmd,
ret);
return ret;
}
static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val)
{
u8 addr = TRF7970A_CMD_BIT_RW | reg;
int ret;
ret = spi_write_then_read(trf->spi, &addr, 1, val, 1);
if (ret)
dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
ret);
dev_dbg(trf->dev, "read(0x%x): 0x%x\n", addr, *val);
return ret;
}
static int trf7970a_read_cont(struct trf7970a *trf, u8 reg, u8 *buf,
size_t len)
{
u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS;
struct spi_transfer t[2];
struct spi_message m;
int ret;
dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t[0].tx_buf = &addr;
t[0].len = sizeof(addr);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
ret = spi_sync(trf->spi, &m);
if (ret)
dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
ret);
return ret;
}
static int trf7970a_write(struct trf7970a *trf, u8 reg, u8 val)
{
u8 buf[2] = { reg, val };
int ret;
dev_dbg(trf->dev, "write(0x%x): 0x%x\n", reg, val);
ret = spi_write(trf->spi, buf, 2);
if (ret)
dev_err(trf->dev, "%s - write: 0x%x 0x%x, ret: %d\n", __func__,
buf[0], buf[1], ret);
return ret;
}
static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status)
{
int ret;
u8 buf[2];
u8 addr;
addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW;
if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ) {
addr |= TRF7970A_CMD_BIT_CONTINUOUS;
ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2);
} else {
ret = spi_write_then_read(trf->spi, &addr, 1, buf, 1);
}
if (ret)
dev_err(trf->dev, "%s - irqstatus: Status read failed: %d\n",
__func__, ret);
else
*status = buf[0];
return ret;
}
static int trf7970a_read_target_proto(struct trf7970a *trf, u8 *target_proto)
{
int ret;
u8 buf[2];
u8 addr;
addr = TRF79070A_NFC_TARGET_PROTOCOL | TRF7970A_CMD_BIT_RW |
TRF7970A_CMD_BIT_CONTINUOUS;
ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2);
if (ret)
dev_err(trf->dev, "%s - target_proto: Read failed: %d\n",
__func__, ret);
else
*target_proto = buf[0];
return ret;
}
static int trf7970a_mode_detect(struct trf7970a *trf, u8 *rf_tech)
{
int ret;
u8 target_proto, tech;
ret = trf7970a_read_target_proto(trf, &target_proto);
if (ret)
return ret;
switch (target_proto) {
case TRF79070A_NFC_TARGET_PROTOCOL_106A:
tech = NFC_DIGITAL_RF_TECH_106A;
break;
case TRF79070A_NFC_TARGET_PROTOCOL_106B:
tech = NFC_DIGITAL_RF_TECH_106B;
break;
case TRF79070A_NFC_TARGET_PROTOCOL_212F:
tech = NFC_DIGITAL_RF_TECH_212F;
break;
case TRF79070A_NFC_TARGET_PROTOCOL_424F:
tech = NFC_DIGITAL_RF_TECH_424F;
break;
default:
dev_dbg(trf->dev, "%s - mode_detect: target_proto: 0x%x\n",
__func__, target_proto);
return -EIO;
}
*rf_tech = tech;
return ret;
}
static void trf7970a_send_upstream(struct trf7970a *trf)
{
dev_kfree_skb_any(trf->tx_skb);
trf->tx_skb = NULL;
if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting)
print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE,
16, 1, trf->rx_skb->data, trf->rx_skb->len,
false);
trf->state = TRF7970A_ST_IDLE;
if (trf->aborting) {
dev_dbg(trf->dev, "Abort process complete\n");
if (!IS_ERR(trf->rx_skb)) {
kfree_skb(trf->rx_skb);
trf->rx_skb = ERR_PTR(-ECANCELED);
}
trf->aborting = false;
}
trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
trf->rx_skb = NULL;
}
static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
{
dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno);
cancel_delayed_work(&trf->timeout_work);
kfree_skb(trf->rx_skb);
trf->rx_skb = ERR_PTR(errno);
trf7970a_send_upstream(trf);
}
static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
unsigned int len, const u8 *prefix,
unsigned int prefix_len)
{
struct spi_transfer t[2];
struct spi_message m;
unsigned int timeout;
int ret;
print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE,
16, 1, skb->data, len, false);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t[0].tx_buf = prefix;
t[0].len = prefix_len;
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = skb->data;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
ret = spi_sync(trf->spi, &m);
if (ret) {
dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__,
ret);
return ret;
}
skb_pull(skb, len);
if (skb->len > 0) {
trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO;
timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT;
} else {
if (trf->issue_eof) {
trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF;
timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF;
} else {
trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
if (!trf->timeout)
timeout = TRF7970A_WAIT_FOR_TX_IRQ;
else
timeout = trf->timeout;
}
}
dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout,
trf->state);
schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout));
return 0;
}
static void trf7970a_fill_fifo(struct trf7970a *trf)
{
struct sk_buff *skb = trf->tx_skb;
unsigned int len;
int ret;
u8 fifo_bytes;
u8 prefix;
ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
if (ret) {
trf7970a_send_err_upstream(trf, ret);
return;
}
dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW;
/* Calculate how much more data can be written to the fifo */
len = TRF7970A_FIFO_SIZE - fifo_bytes;
if (!len) {
schedule_delayed_work(&trf->timeout_work,
msecs_to_jiffies(TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT));
return;
}
len = min(skb->len, len);
prefix = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_FIFO_IO_REGISTER;
ret = trf7970a_transmit(trf, skb, len, &prefix, sizeof(prefix));
if (ret)
trf7970a_send_err_upstream(trf, ret);
}
static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status)
{
struct sk_buff *skb = trf->rx_skb;
int ret;
u8 fifo_bytes;
if (status & TRF7970A_IRQ_STATUS_ERROR) {
trf7970a_send_err_upstream(trf, -EIO);
return;
}
ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
if (ret) {
trf7970a_send_err_upstream(trf, ret);
return;
}
dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW;
if (!fifo_bytes)
goto no_rx_data;
if (fifo_bytes > skb_tailroom(skb)) {
skb = skb_copy_expand(skb, skb_headroom(skb),
max_t(int, fifo_bytes,
TRF7970A_RX_SKB_ALLOC_SIZE),
GFP_KERNEL);
if (!skb) {
trf7970a_send_err_upstream(trf, -ENOMEM);
return;
}
kfree_skb(trf->rx_skb);
trf->rx_skb = skb;
}
ret = trf7970a_read_cont(trf, TRF7970A_FIFO_IO_REGISTER,
skb_put(skb, fifo_bytes), fifo_bytes);
if (ret) {
trf7970a_send_err_upstream(trf, ret);
return;
}
/* If received Type 2 ACK/NACK, shift right 4 bits and pass up */
if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) &&
(trf->special_fcn_reg1 == TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX)) {
skb->data[0] >>= 4;
status = TRF7970A_IRQ_STATUS_SRX;
} else {
trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT;
ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
if (ret) {
trf7970a_send_err_upstream(trf, ret);
return;
}
fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW;
/* If there are bytes in the FIFO, set status to '0' so
* the if stmt below doesn't fire and the driver will wait
* for the trf7970a to generate another RX interrupt.
*/
if (fifo_bytes)
status = 0;
}
no_rx_data:
if (status == TRF7970A_IRQ_STATUS_SRX) { /* Receive complete */
trf7970a_send_upstream(trf);
return;
}
dev_dbg(trf->dev, "Setting timeout for %d ms\n",
TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT);
schedule_delayed_work(&trf->timeout_work,
msecs_to_jiffies(TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT));
}
static irqreturn_t trf7970a_irq(int irq, void *dev_id)
{
struct trf7970a *trf = dev_id;
int ret;
u8 status, fifo_bytes, iso_ctrl;
mutex_lock(&trf->lock);
if (trf->state == TRF7970A_ST_RF_OFF) {
mutex_unlock(&trf->lock);
return IRQ_NONE;
}
ret = trf7970a_read_irqstatus(trf, &status);
if (ret) {
mutex_unlock(&trf->lock);
return IRQ_NONE;
}
dev_dbg(trf->dev, "IRQ - state: %d, status: 0x%x\n", trf->state,
status);
if (!status) {
mutex_unlock(&trf->lock);
return IRQ_NONE;
}
switch (trf->state) {
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
/* If initiator and getting interrupts caused by RF noise,
* turn off the receiver to avoid unnecessary interrupts.
* It will be turned back on in trf7970a_send_cmd() when
* the next command is issued.
*/
if (trf->is_initiator && (status & TRF7970A_IRQ_STATUS_ERROR)) {
trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX);
trf->state = TRF7970A_ST_IDLE_RX_BLOCKED;
}
trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
break;
case TRF7970A_ST_WAIT_FOR_TX_FIFO:
if (status & TRF7970A_IRQ_STATUS_TX) {
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
trf7970a_fill_fifo(trf);
} else {
trf7970a_send_err_upstream(trf, -EIO);
}
break;
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
if (status & TRF7970A_IRQ_STATUS_SRX) {
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
trf7970a_drain_fifo(trf, status);
} else if (status & TRF7970A_IRQ_STATUS_FIFO) {
ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS,
&fifo_bytes);
fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW;
if (ret)
trf7970a_send_err_upstream(trf, ret);
else if (!fifo_bytes)
trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
} else if ((status == TRF7970A_IRQ_STATUS_TX) ||
(!trf->is_initiator &&
(status == (TRF7970A_IRQ_STATUS_TX |
TRF7970A_IRQ_STATUS_NFC_RF)))) {
trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
if (!trf->timeout) {
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
trf->rx_skb = ERR_PTR(0);
trf7970a_send_upstream(trf);
break;
}
if (trf->is_initiator)
break;
iso_ctrl = trf->iso_ctrl;
switch (trf->framing) {
case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */
break;
case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */
break;
case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE:
ret = trf7970a_write(trf,
TRF7970A_SPECIAL_FCN_REG1,
TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL);
if (ret)
goto err_unlock_exit;
trf->special_fcn_reg1 =
TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL;
break;
default:
break;
}
if (iso_ctrl != trf->iso_ctrl) {
ret = trf7970a_write(trf, TRF7970A_ISO_CTRL,
iso_ctrl);
if (ret)
goto err_unlock_exit;
trf->iso_ctrl = iso_ctrl;
}
} else {
trf7970a_send_err_upstream(trf, -EIO);
}
break;
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
if (status != TRF7970A_IRQ_STATUS_TX)
trf7970a_send_err_upstream(trf, -EIO);
break;
case TRF7970A_ST_LISTENING:
if (status & TRF7970A_IRQ_STATUS_SRX) {
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
trf7970a_drain_fifo(trf, status);
} else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) {
trf7970a_send_err_upstream(trf, -EIO);
}
break;
case TRF7970A_ST_LISTENING_MD:
if (status & TRF7970A_IRQ_STATUS_SRX) {
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
ret = trf7970a_mode_detect(trf, &trf->md_rf_tech);
if (ret) {
trf7970a_send_err_upstream(trf, ret);
} else {
trf->state = TRF7970A_ST_LISTENING;
trf7970a_drain_fifo(trf, status);
}
} else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) {
trf7970a_send_err_upstream(trf, -EIO);
}
break;
default:
dev_err(trf->dev, "%s - Driver in invalid state: %d\n",
__func__, trf->state);
}
err_unlock_exit:
mutex_unlock(&trf->lock);
return IRQ_HANDLED;
}
static void trf7970a_issue_eof(struct trf7970a *trf)
{
int ret;
dev_dbg(trf->dev, "Issuing EOF\n");
ret = trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
if (ret)
trf7970a_send_err_upstream(trf, ret);
ret = trf7970a_cmd(trf, TRF7970A_CMD_EOF);
if (ret)
trf7970a_send_err_upstream(trf, ret);
trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n",
trf->timeout, trf->state);
schedule_delayed_work(&trf->timeout_work,
msecs_to_jiffies(trf->timeout));
}
static void trf7970a_timeout_work_handler(struct work_struct *work)
{
struct trf7970a *trf = container_of(work, struct trf7970a,
timeout_work.work);
dev_dbg(trf->dev, "Timeout - state: %d, ignore_timeout: %d\n",
trf->state, trf->ignore_timeout);
mutex_lock(&trf->lock);
if (trf->ignore_timeout)
trf->ignore_timeout = false;
else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT)
trf7970a_drain_fifo(trf, TRF7970A_IRQ_STATUS_SRX);
else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF)
trf7970a_issue_eof(trf);
else
trf7970a_send_err_upstream(trf, -ETIMEDOUT);
mutex_unlock(&trf->lock);
}
static int trf7970a_init(struct trf7970a *trf)
{
int ret;
dev_dbg(trf->dev, "Initializing device - state: %d\n", trf->state);
ret = trf7970a_cmd(trf, TRF7970A_CMD_SOFT_INIT);
if (ret)
goto err_out;
ret = trf7970a_cmd(trf, TRF7970A_CMD_IDLE);
if (ret)
goto err_out;
ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL,
trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1));
if (ret)
goto err_out;
ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
if (ret)
goto err_out;
usleep_range(1000, 2000);
trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
trf->modulator_sys_clk_ctrl);
if (ret)
goto err_out;
ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS,
TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 |
TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32);
if (ret)
goto err_out;
ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, 0);
if (ret)
goto err_out;
trf->special_fcn_reg1 = 0;
trf->iso_ctrl = 0xff;
return 0;
err_out:
dev_dbg(trf->dev, "Couldn't init device: %d\n", ret);
return ret;
}
static void trf7970a_switch_rf_off(struct trf7970a *trf)
{
if ((trf->state == TRF7970A_ST_PWR_OFF) ||
(trf->state == TRF7970A_ST_RF_OFF))
return;
dev_dbg(trf->dev, "Switching rf off\n");
trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl);
trf->aborting = false;
trf->state = TRF7970A_ST_RF_OFF;
pm_runtime_mark_last_busy(trf->dev);
pm_runtime_put_autosuspend(trf->dev);
}
static int trf7970a_switch_rf_on(struct trf7970a *trf)
{
int ret;
dev_dbg(trf->dev, "Switching rf on\n");
pm_runtime_get_sync(trf->dev);
if (trf->state != TRF7970A_ST_RF_OFF) { /* Power on, RF off */
dev_err(trf->dev, "%s - Incorrect state: %d\n", __func__,
trf->state);
return -EINVAL;
}
ret = trf7970a_init(trf);
if (ret) {
dev_err(trf->dev, "%s - Can't initialize: %d\n", __func__, ret);
return ret;
}
trf->state = TRF7970A_ST_IDLE;
return 0;
}
static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret = 0;
dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
mutex_lock(&trf->lock);
if (on) {
switch (trf->state) {
case TRF7970A_ST_PWR_OFF:
case TRF7970A_ST_RF_OFF:
ret = trf7970a_switch_rf_on(trf);
break;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
break;
default:
dev_err(trf->dev, "%s - Invalid request: %d %d\n",
__func__, trf->state, on);
trf7970a_switch_rf_off(trf);
ret = -EINVAL;
}
} else {
switch (trf->state) {
case TRF7970A_ST_PWR_OFF:
case TRF7970A_ST_RF_OFF:
break;
default:
dev_err(trf->dev, "%s - Invalid request: %d %d\n",
__func__, trf->state, on);
ret = -EINVAL;
fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
trf7970a_switch_rf_off(trf);
}
}
mutex_unlock(&trf->lock);
return ret;
}
static int trf7970a_in_config_rf_tech(struct trf7970a *trf, int tech)
{
int ret = 0;
dev_dbg(trf->dev, "rf technology: %d\n", tech);
switch (tech) {
case NFC_DIGITAL_RF_TECH_106A:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_OOK;
trf->guard_time = TRF7970A_GUARD_TIME_NFCA;
break;
case NFC_DIGITAL_RF_TECH_106B:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_ASK10;
trf->guard_time = TRF7970A_GUARD_TIME_NFCB;
break;
case NFC_DIGITAL_RF_TECH_212F:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_ASK10;
trf->guard_time = TRF7970A_GUARD_TIME_NFCF;
break;
case NFC_DIGITAL_RF_TECH_424F:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_ASK10;
trf->guard_time = TRF7970A_GUARD_TIME_NFCF;
break;
case NFC_DIGITAL_RF_TECH_ISO15693:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_OOK;
trf->guard_time = TRF7970A_GUARD_TIME_15693;
break;
default:
dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
return -EINVAL;
}
trf->technology = tech;
/* If in initiator mode and not changing the RF tech due to a
* PSL sequence (indicated by 'trf->iso_ctrl == 0xff' from
* trf7970a_init()), clear the NFC Target Detection Level register
* due to erratum.
*/
if (trf->iso_ctrl == 0xff)
ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
return ret;
}
static int trf7970a_is_rf_field(struct trf7970a *trf, bool *is_rf_field)
{
int ret;
u8 rssi;
ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
trf->chip_status_ctrl |
TRF7970A_CHIP_STATUS_REC_ON);
if (ret)
return ret;
ret = trf7970a_cmd(trf, TRF7970A_CMD_TEST_EXT_RF);
if (ret)
return ret;
usleep_range(50, 60);
ret = trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi);
if (ret)
return ret;
ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
trf->chip_status_ctrl);
if (ret)
return ret;
if (rssi & TRF7970A_RSSI_OSC_STATUS_RSSI_MASK)
*is_rf_field = true;
else
*is_rf_field = false;
return 0;
}
static int trf7970a_in_config_framing(struct trf7970a *trf, int framing)
{
u8 iso_ctrl = trf->iso_ctrl_tech;
bool is_rf_field = false;
int ret;
dev_dbg(trf->dev, "framing: %d\n", framing);
switch (framing) {
case NFC_DIGITAL_FRAMING_NFCA_SHORT:
case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
case NFC_DIGITAL_FRAMING_NFCA_T4T:
case NFC_DIGITAL_FRAMING_NFCB:
case NFC_DIGITAL_FRAMING_NFCB_T4T:
case NFC_DIGITAL_FRAMING_NFCF:
case NFC_DIGITAL_FRAMING_NFCF_T3T:
case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
case NFC_DIGITAL_FRAMING_ISO15693_T5T:
case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP:
case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFCA_T2T:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
break;
default:
dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
return -EINVAL;
}
trf->framing = framing;
if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
ret = trf7970a_is_rf_field(trf, &is_rf_field);
if (ret)
return ret;
if (is_rf_field)
return -EBUSY;
}
if (iso_ctrl != trf->iso_ctrl) {
ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
if (ret)
return ret;
trf->iso_ctrl = iso_ctrl;
ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
trf->modulator_sys_clk_ctrl);
if (ret)
return ret;
}
if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
trf->chip_status_ctrl |
TRF7970A_CHIP_STATUS_RF_ON);
if (ret)
return ret;
trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON;
usleep_range(trf->guard_time, trf->guard_time + 1000);
}
return 0;
}
static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
int param)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
mutex_lock(&trf->lock);
trf->is_initiator = true;
if ((trf->state == TRF7970A_ST_PWR_OFF) ||
(trf->state == TRF7970A_ST_RF_OFF)) {
ret = trf7970a_switch_rf_on(trf);
if (ret)
goto err_unlock;
}
switch (type) {
case NFC_DIGITAL_CONFIG_RF_TECH:
ret = trf7970a_in_config_rf_tech(trf, param);
break;
case NFC_DIGITAL_CONFIG_FRAMING:
ret = trf7970a_in_config_framing(trf, param);
break;
default:
dev_dbg(trf->dev, "Unknown type: %d\n", type);
ret = -EINVAL;
}
err_unlock:
mutex_unlock(&trf->lock);
return ret;
}
static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
{
switch (cmd) {
case ISO15693_CMD_WRITE_SINGLE_BLOCK:
case ISO15693_CMD_LOCK_BLOCK:
case ISO15693_CMD_WRITE_MULTIPLE_BLOCK:
case ISO15693_CMD_WRITE_AFI:
case ISO15693_CMD_LOCK_AFI:
case ISO15693_CMD_WRITE_DSFID:
case ISO15693_CMD_LOCK_DSFID:
return 1;
default:
return 0;
}
}
static int trf7970a_per_cmd_config(struct trf7970a *trf,
const struct sk_buff *skb)
{
const u8 *req = skb->data;
u8 special_fcn_reg1, iso_ctrl;
int ret;
trf->issue_eof = false;
/* When issuing Type 2 read command, make sure the '4_bit_RX' bit in
* special functions register 1 is cleared; otherwise, its a write or
* sector select command and '4_bit_RX' must be set.
*
* When issuing an ISO 15693 command, inspect the flags byte to see
* what speed to use. Also, remember if the OPTION flag is set on
* a Type 5 write or lock command so the driver will know that it
* has to send an EOF in order to get a response.
*/
if ((trf->technology == NFC_DIGITAL_RF_TECH_106A) &&
(trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T)) {
if (req[0] == NFC_T2T_CMD_READ)
special_fcn_reg1 = 0;
else
special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX;
if (special_fcn_reg1 != trf->special_fcn_reg1) {
ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1,
special_fcn_reg1);
if (ret)
return ret;
trf->special_fcn_reg1 = special_fcn_reg1;
}
} else if (trf->technology == NFC_DIGITAL_RF_TECH_ISO15693) {
iso_ctrl = trf->iso_ctrl & ~TRF7970A_ISO_CTRL_RFID_SPEED_MASK;
switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) {
case 0x00:
iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_662;
break;
case ISO15693_REQ_FLAG_SUB_CARRIER:
iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a;
break;
case ISO15693_REQ_FLAG_DATA_RATE:
iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
break;
case (ISO15693_REQ_FLAG_SUB_CARRIER |
ISO15693_REQ_FLAG_DATA_RATE):
iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669;
break;
}
if (iso_ctrl != trf->iso_ctrl) {
ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
if (ret)
return ret;
trf->iso_ctrl = iso_ctrl;
}
if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
trf7970a_is_iso15693_write_or_lock(req[1]) &&
(req[0] & ISO15693_REQ_FLAG_OPTION))
trf->issue_eof = true;
}
return 0;
}
static int trf7970a_send_cmd(struct nfc_digital_dev *ddev,
struct sk_buff *skb, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
u8 prefix[5];
unsigned int len;
int ret;
u8 status;
dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n",
trf->state, timeout, skb->len);
if (skb->len > TRF7970A_TX_MAX)
return -EINVAL;
mutex_lock(&trf->lock);
if ((trf->state != TRF7970A_ST_IDLE) &&
(trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) {
dev_err(trf->dev, "%s - Bogus state: %d\n", __func__,
trf->state);
ret = -EIO;
goto out_err;
}
if (trf->aborting) {
dev_dbg(trf->dev, "Abort process complete\n");
trf->aborting = false;
ret = -ECANCELED;
goto out_err;
}
if (timeout) {
trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE,
GFP_KERNEL);
if (!trf->rx_skb) {
dev_dbg(trf->dev, "Can't alloc rx_skb\n");
ret = -ENOMEM;
goto out_err;
}
}
if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) {
ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX);
if (ret)
goto out_err;
trf->state = TRF7970A_ST_IDLE;
}
if (trf->is_initiator) {
ret = trf7970a_per_cmd_config(trf, skb);
if (ret)
goto out_err;
}
trf->ddev = ddev;
trf->tx_skb = skb;
trf->cb = cb;
trf->cb_arg = arg;
trf->timeout = timeout;
trf->ignore_timeout = false;
len = skb->len;
/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
* on what the current framing is, the address of the TX length byte 1
* register (0x1d), and the 2 byte length of the data to be transmitted.
* That totals 5 bytes.
*/
prefix[0] = TRF7970A_CMD_BIT_CTRL |
TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET);
prefix[1] = TRF7970A_CMD_BIT_CTRL |
TRF7970A_CMD_BIT_OPCODE(trf->tx_cmd);
prefix[2] = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_TX_LENGTH_BYTE1;
if (trf->framing == NFC_DIGITAL_FRAMING_NFCA_SHORT) {
prefix[3] = 0x00;
prefix[4] = 0x0f; /* 7 bits */
} else {
prefix[3] = (len & 0xf00) >> 4;
prefix[3] |= ((len & 0xf0) >> 4);
prefix[4] = ((len & 0x0f) << 4);
}
len = min_t(int, skb->len, TRF7970A_FIFO_SIZE);
/* Clear possible spurious interrupt */
ret = trf7970a_read_irqstatus(trf, &status);
if (ret)
goto out_err;
ret = trf7970a_transmit(trf, skb, len, prefix, sizeof(prefix));
if (ret) {
kfree_skb(trf->rx_skb);
trf->rx_skb = NULL;
}
out_err:
mutex_unlock(&trf->lock);
return ret;
}
static int trf7970a_tg_config_rf_tech(struct trf7970a *trf, int tech)
{
int ret = 0;
dev_dbg(trf->dev, "rf technology: %d\n", tech);
switch (tech) {
case NFC_DIGITAL_RF_TECH_106A:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE |
TRF7970A_ISO_CTRL_NFC_CE | TRF7970A_ISO_CTRL_NFC_CE_14443A;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_OOK;
break;
case NFC_DIGITAL_RF_TECH_212F:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE |
TRF7970A_ISO_CTRL_NFC_NFCF_212;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_ASK10;
break;
case NFC_DIGITAL_RF_TECH_424F:
trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE |
TRF7970A_ISO_CTRL_NFC_NFCF_424;
trf->modulator_sys_clk_ctrl =
(trf->modulator_sys_clk_ctrl & 0xf8) |
TRF7970A_MODULATOR_DEPTH_ASK10;
break;
default:
dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
return -EINVAL;
}
trf->technology = tech;
/* Normally we write the ISO_CTRL register in
* trf7970a_tg_config_framing() because the framing can change
* the value written. However, when sending a PSL RES,
* digital_tg_send_psl_res_complete() doesn't call
* trf7970a_tg_config_framing() so we must write the register
* here.
*/
if ((trf->framing == NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED) &&
(trf->iso_ctrl_tech != trf->iso_ctrl)) {
ret = trf7970a_write(trf, TRF7970A_ISO_CTRL,
trf->iso_ctrl_tech);
trf->iso_ctrl = trf->iso_ctrl_tech;
}
return ret;
}
/* Since this is a target routine, several of the framing calls are
* made between receiving the request and sending the response so they
* should take effect until after the response is sent. This is accomplished
* by skipping the ISO_CTRL register write here and doing it in the interrupt
* handler.
*/
static int trf7970a_tg_config_framing(struct trf7970a *trf, int framing)
{
u8 iso_ctrl = trf->iso_ctrl_tech;
int ret;
dev_dbg(trf->dev, "framing: %d\n", framing);
switch (framing) {
case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE:
/* These ones are applied in the interrupt handler */
iso_ctrl = trf->iso_ctrl; /* Don't write to ISO_CTRL yet */
break;
case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
break;
default:
dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
return -EINVAL;
}
trf->framing = framing;
if (iso_ctrl != trf->iso_ctrl) {
ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
if (ret)
return ret;
trf->iso_ctrl = iso_ctrl;
ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
trf->modulator_sys_clk_ctrl);
if (ret)
return ret;
}
if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
trf->chip_status_ctrl |
TRF7970A_CHIP_STATUS_RF_ON);
if (ret)
return ret;
trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON;
}
return 0;
}
static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev, int type,
int param)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
mutex_lock(&trf->lock);
trf->is_initiator = false;
if ((trf->state == TRF7970A_ST_PWR_OFF) ||
(trf->state == TRF7970A_ST_RF_OFF)) {
ret = trf7970a_switch_rf_on(trf);
if (ret)
goto err_unlock;
}
switch (type) {
case NFC_DIGITAL_CONFIG_RF_TECH:
ret = trf7970a_tg_config_rf_tech(trf, param);
break;
case NFC_DIGITAL_CONFIG_FRAMING:
ret = trf7970a_tg_config_framing(trf, param);
break;
default:
dev_dbg(trf->dev, "Unknown type: %d\n", type);
ret = -EINVAL;
}
err_unlock:
mutex_unlock(&trf->lock);
return ret;
}
static int _trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg,
bool mode_detect)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
mutex_lock(&trf->lock);
if ((trf->state != TRF7970A_ST_IDLE) &&
(trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) {
dev_err(trf->dev, "%s - Bogus state: %d\n", __func__,
trf->state);
ret = -EIO;
goto out_err;
}
if (trf->aborting) {
dev_dbg(trf->dev, "Abort process complete\n");
trf->aborting = false;
ret = -ECANCELED;
goto out_err;
}
trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE,
GFP_KERNEL);
if (!trf->rx_skb) {
dev_dbg(trf->dev, "Can't alloc rx_skb\n");
ret = -ENOMEM;
goto out_err;
}
ret = trf7970a_write(trf, TRF7970A_RX_SPECIAL_SETTINGS,
TRF7970A_RX_SPECIAL_SETTINGS_HBT |
TRF7970A_RX_SPECIAL_SETTINGS_M848 |
TRF7970A_RX_SPECIAL_SETTINGS_C424 |
TRF7970A_RX_SPECIAL_SETTINGS_C212);
if (ret)
goto out_err;
ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL,
trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1));
if (ret)
goto out_err;
ret = trf7970a_write(trf, TRF7970A_NFC_LOW_FIELD_LEVEL,
TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(0x3));
if (ret)
goto out_err;
ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL,
TRF7970A_NFC_TARGET_LEVEL_RFDET(0x7));
if (ret)
goto out_err;
trf->ddev = ddev;
trf->cb = cb;
trf->cb_arg = arg;
trf->timeout = timeout;
trf->ignore_timeout = false;
ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX);
if (ret)
goto out_err;
trf->state = mode_detect ? TRF7970A_ST_LISTENING_MD :
TRF7970A_ST_LISTENING;
schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout));
out_err:
mutex_unlock(&trf->lock);
return ret;
}
static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Listen - state: %d, timeout: %d ms\n",
trf->state, timeout);
return _trf7970a_tg_listen(ddev, timeout, cb, arg, false);
}
static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev,
u16 timeout, nfc_digital_cmd_complete_t cb,
void *arg)
{
const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
dev_dbg(trf->dev, "Listen MD - state: %d, timeout: %d ms\n",
trf->state, timeout);
ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
NFC_DIGITAL_RF_TECH_106A);
if (ret)
return ret;
ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
if (ret)
return ret;
return _trf7970a_tg_listen(ddev, timeout, cb, arg, true);
}
static int trf7970a_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech)
{
const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Get RF Tech - state: %d, rf_tech: %d\n",
trf->state, trf->md_rf_tech);
*rf_tech = trf->md_rf_tech;
return 0;
}
static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Abort process initiated\n");
mutex_lock(&trf->lock);
switch (trf->state) {
case TRF7970A_ST_WAIT_FOR_TX_FIFO:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
trf->aborting = true;
break;
case TRF7970A_ST_LISTENING:
trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work);
trf7970a_send_err_upstream(trf, -ECANCELED);
dev_dbg(trf->dev, "Abort process complete\n");
break;
default:
break;
}
mutex_unlock(&trf->lock);
}
static const struct nfc_digital_ops trf7970a_nfc_ops = {
.in_configure_hw = trf7970a_in_configure_hw,
.in_send_cmd = trf7970a_send_cmd,
.tg_configure_hw = trf7970a_tg_configure_hw,
.tg_send_cmd = trf7970a_send_cmd,
.tg_listen = trf7970a_tg_listen,
.tg_listen_md = trf7970a_tg_listen_md,
.tg_get_rf_tech = trf7970a_tg_get_rf_tech,
.switch_rf = trf7970a_switch_rf,
.abort_cmd = trf7970a_abort_cmd,
};
static int trf7970a_power_up(struct trf7970a *trf)
{
int ret;
dev_dbg(trf->dev, "Powering up - state: %d\n", trf->state);
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
}
usleep_range(5000, 6000);
if (trf->en2_gpiod &&
!(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) {
gpiod_set_value_cansleep(trf->en2_gpiod, 1);
usleep_range(1000, 2000);
}
gpiod_set_value_cansleep(trf->en_gpiod, 1);
usleep_range(20000, 21000);
trf->state = TRF7970A_ST_RF_OFF;
return 0;
}
static int trf7970a_power_down(struct trf7970a *trf)
{
int ret;
dev_dbg(trf->dev, "Powering down - state: %d\n", trf->state);
if (trf->state == TRF7970A_ST_PWR_OFF)
return 0;
if (trf->state != TRF7970A_ST_RF_OFF) {
dev_dbg(trf->dev, "Can't power down - not RF_OFF state (%d)\n",
trf->state);
return -EBUSY;
}
gpiod_set_value_cansleep(trf->en_gpiod, 0);
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
trf->state = TRF7970A_ST_PWR_OFF;
return ret;
}
static int trf7970a_startup(struct trf7970a *trf)
{
int ret;
ret = trf7970a_power_up(trf);
if (ret)
return ret;
pm_runtime_set_active(trf->dev);
pm_runtime_enable(trf->dev);
pm_runtime_mark_last_busy(trf->dev);
return 0;
}
static void trf7970a_shutdown(struct trf7970a *trf)
{
switch (trf->state) {
case TRF7970A_ST_WAIT_FOR_TX_FIFO:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
case TRF7970A_ST_LISTENING:
trf7970a_send_err_upstream(trf, -ECANCELED);
fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
trf7970a_switch_rf_off(trf);
break;
default:
break;
}
pm_runtime_disable(trf->dev);
pm_runtime_set_suspended(trf->dev);
trf7970a_power_down(trf);
}
static int trf7970a_get_autosuspend_delay(const struct device_node *np)
{
int autosuspend_delay, ret;
ret = of_property_read_u32(np, "autosuspend-delay", &autosuspend_delay);
if (ret)
autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY;
return autosuspend_delay;
}
static int trf7970a_probe(struct spi_device *spi)
{
const struct device_node *np = spi->dev.of_node;
struct trf7970a *trf;
int uvolts, autosuspend_delay, ret;
u32 clk_freq = TRF7970A_13MHZ_CLOCK_FREQUENCY;
if (!np) {
dev_err(&spi->dev, "No Device Tree entry\n");
return -EINVAL;
}
trf = devm_kzalloc(&spi->dev, sizeof(*trf), GFP_KERNEL);
if (!trf)
return -ENOMEM;
trf->state = TRF7970A_ST_PWR_OFF;
trf->dev = &spi->dev;
trf->spi = spi;
spi->mode = SPI_MODE_1;
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(trf->dev, "Can't set up SPI Communication\n");
return ret;
}
if (of_property_read_bool(np, "irq-status-read-quirk"))
trf->quirks |= TRF7970A_QUIRK_IRQ_STATUS_READ;
/* There are two enable pins - only EN must be present in the DT */
trf->en_gpiod = devm_gpiod_get_index(trf->dev, "ti,enable", 0,
GPIOD_OUT_LOW);
if (IS_ERR(trf->en_gpiod)) {
dev_err(trf->dev, "No EN GPIO property\n");
return PTR_ERR(trf->en_gpiod);
}
trf->en2_gpiod = devm_gpiod_get_index_optional(trf->dev, "ti,enable", 1,
GPIOD_OUT_LOW);
if (!trf->en2_gpiod) {
dev_info(trf->dev, "No EN2 GPIO property\n");
} else if (IS_ERR(trf->en2_gpiod)) {
dev_err(trf->dev, "Error getting EN2 GPIO property: %ld\n",
PTR_ERR(trf->en2_gpiod));
return PTR_ERR(trf->en2_gpiod);
} else if (of_property_read_bool(np, "en2-rf-quirk")) {
trf->quirks |= TRF7970A_QUIRK_EN2_MUST_STAY_LOW;
}
of_property_read_u32(np, "clock-frequency", &clk_freq);
if ((clk_freq != TRF7970A_27MHZ_CLOCK_FREQUENCY) &&
(clk_freq != TRF7970A_13MHZ_CLOCK_FREQUENCY)) {
dev_err(trf->dev,
"clock-frequency (%u Hz) unsupported\n", clk_freq);
return -EINVAL;
}
if (clk_freq == TRF7970A_27MHZ_CLOCK_FREQUENCY) {
trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_27MHZ;
dev_dbg(trf->dev, "trf7970a configured for 27MHz crystal\n");
} else {
trf->modulator_sys_clk_ctrl = 0;
}
ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL,
trf7970a_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"trf7970a", trf);
if (ret) {
dev_err(trf->dev, "Can't request IRQ#%d: %d\n", spi->irq, ret);
return ret;
}
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->vin_regulator)) {
ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->vddio_regulator)) {
ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
goto err_disable_vin_regulator;
}
ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
goto err_disable_vin_regulator;
}
if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
TRF7970A_SUPPORTED_PROTOCOLS,
NFC_DIGITAL_DRV_CAPS_IN_CRC |
NFC_DIGITAL_DRV_CAPS_TG_CRC, 0,
0);
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
nfc_digital_set_drvdata(trf->ddev, trf);
spi_set_drvdata(spi, trf);
autosuspend_delay = trf7970a_get_autosuspend_delay(np);
pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay);
pm_runtime_use_autosuspend(trf->dev);
ret = trf7970a_startup(trf);
if (ret)
goto err_free_ddev;
ret = nfc_digital_register_device(trf->ddev);
if (ret) {
dev_err(trf->dev, "Can't register NFC digital device: %d\n",
ret);
goto err_shutdown;
}
return 0;
err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
err_disable_vddio_regulator:
regulator_disable(trf->vddio_regulator);
err_disable_vin_regulator:
regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
}
static void trf7970a_remove(struct spi_device *spi)
{
struct trf7970a *trf = spi_get_drvdata(spi);
mutex_lock(&trf->lock);
trf7970a_shutdown(trf);
mutex_unlock(&trf->lock);
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
regulator_disable(trf->vddio_regulator);
regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
#ifdef CONFIG_PM_SLEEP
static int trf7970a_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
mutex_lock(&trf->lock);
trf7970a_shutdown(trf);
mutex_unlock(&trf->lock);
return 0;
}
static int trf7970a_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
mutex_lock(&trf->lock);
ret = trf7970a_startup(trf);
mutex_unlock(&trf->lock);
return ret;
}
#endif
#ifdef CONFIG_PM
static int trf7970a_pm_runtime_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
mutex_lock(&trf->lock);
ret = trf7970a_power_down(trf);
mutex_unlock(&trf->lock);
return ret;
}
static int trf7970a_pm_runtime_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
ret = trf7970a_power_up(trf);
if (!ret)
pm_runtime_mark_last_busy(dev);
return ret;
}
#endif
static const struct dev_pm_ops trf7970a_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(trf7970a_suspend, trf7970a_resume)
SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend,
trf7970a_pm_runtime_resume, NULL)
};
static const struct of_device_id trf7970a_of_match[] __maybe_unused = {
{.compatible = "ti,trf7970a",},
{},
};
MODULE_DEVICE_TABLE(of, trf7970a_of_match);
static const struct spi_device_id trf7970a_id_table[] = {
{"trf7970a", 0},
{}
};
MODULE_DEVICE_TABLE(spi, trf7970a_id_table);
static struct spi_driver trf7970a_spi_driver = {
.probe = trf7970a_probe,
.remove = trf7970a_remove,
.id_table = trf7970a_id_table,
.driver = {
.name = "trf7970a",
.of_match_table = of_match_ptr(trf7970a_of_match),
.pm = &trf7970a_pm_ops,
},
};
module_spi_driver(trf7970a_spi_driver);
MODULE_AUTHOR("Mark A. Greer <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI trf7970a RFID/NFC Transceiver Driver");
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
#ifndef __QPLIB_TLV_H__
#define __QPLIB_TLV_H__
struct roce_tlv {
struct tlv tlv;
u8 total_size; // in units of 16 byte chunks
u8 unused[7]; // for 16 byte alignment
};
#define CHUNK_SIZE 16
#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE)
static inline void __roce_1st_tlv_prep(struct roce_tlv *rtlv, u8 tot_chunks,
u16 content_bytes, u8 flags)
{
rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
rtlv->tlv.tlv_type = cpu_to_le16(TLV_TYPE_ROCE_SP_COMMAND);
rtlv->tlv.length = cpu_to_le16(content_bytes);
rtlv->tlv.flags = TLV_FLAGS_REQUIRED;
rtlv->tlv.flags |= flags ? TLV_FLAGS_MORE : 0;
rtlv->total_size = (tot_chunks);
}
static inline void __roce_ext_tlv_prep(struct roce_tlv *rtlv, u16 tlv_type,
u16 content_bytes, u8 more, u8 flags)
{
rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
rtlv->tlv.tlv_type = cpu_to_le16(tlv_type);
rtlv->tlv.length = cpu_to_le16(content_bytes);
rtlv->tlv.flags |= more ? TLV_FLAGS_MORE : 0;
rtlv->tlv.flags |= flags ? TLV_FLAGS_REQUIRED : 0;
}
/*
* TLV size in units of 16 byte chunks
*/
#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
/*
* TLV length in bytes
*/
#define TLV_BYTES (TLV_SIZE * 16)
#define HAS_TLV_HEADER(msg) (le16_to_cpu(((struct tlv *)(msg))->cmd_discr) == CMD_DISCR_TLV_ENCAP)
#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
else
return req->opcode;
}
static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
else
req->opcode = val;
}
static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
else
return req->cookie;
}
static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
u32 size, __le16 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
else
req->cookie = val;
}
static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
else
return req->resp_addr;
}
static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
u32 size, __le64 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
else
req->resp_addr = val;
}
static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
else
return req->resp_size;
}
static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
else
req->resp_size = val;
}
static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct roce_tlv *)(req))->total_size;
else
return req->cmd_size;
}
static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
else
req->cmd_size = val;
}
static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
else
return req->flags;
}
static inline void __set_cmdq_base_flags(struct cmdq_base *req,
u32 size, __le16 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
else
req->flags = val;
}
struct bnxt_qplib_tlv_modify_cc_req {
struct roce_tlv tlv_hdr;
struct cmdq_modify_roce_cc base_req;
__le64 tlvpad;
struct cmdq_modify_roce_cc_gen1_tlv ext_req;
};
struct bnxt_qplib_tlv_query_rcc_sb {
struct roce_tlv tlv_hdr;
struct creq_query_roce_cc_resp_sb base_sb;
struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb;
};
#endif /* __QPLIB_TLV_H__ */
|
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef UVERBS_H
#define UVERBS_H
#include <linux/kref.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/cdev.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/uverbs_std_types.h>
#define UVERBS_MODULE_NAME ib_uverbs
#include <rdma/uverbs_named_ioctl.h>
static inline void
ib_uverbs_init_udata(struct ib_udata *udata,
const void __user *ibuf,
void __user *obuf,
size_t ilen, size_t olen)
{
udata->inbuf = ibuf;
udata->outbuf = obuf;
udata->inlen = ilen;
udata->outlen = olen;
}
static inline void
ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
const void __user *ibuf,
void __user *obuf,
size_t ilen, size_t olen)
{
ib_uverbs_init_udata(udata,
ilen ? ibuf : NULL, olen ? obuf : NULL,
ilen, olen);
}
/*
* Our lifetime rules for these structs are the following:
*
* struct ib_uverbs_device: One reference is held by the module and
* released in ib_uverbs_remove_one(). Another reference is taken by
* ib_uverbs_open() each time the character special file is opened,
* and released in ib_uverbs_release_file() when the file is released.
*
* struct ib_uverbs_file: One reference is held by the VFS and
* released when the file is closed. Another reference is taken when
* an asynchronous event queue file is created and released when the
* event file is closed.
*
* struct ib_uverbs_event_queue: Base structure for
* struct ib_uverbs_async_event_file and struct ib_uverbs_completion_event_file.
* One reference is held by the VFS and released when the file is closed.
* For asynchronous event files, another reference is held by the corresponding
* main context file and released when that file is closed. For completion
* event files, a reference is taken when a CQ is created that uses the file,
* and released when the CQ is destroyed.
*/
struct ib_uverbs_device {
refcount_t refcount;
u32 num_comp_vectors;
struct completion comp;
struct device dev;
/* First group for device attributes, NULL terminated array */
const struct attribute_group *groups[2];
struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
struct uverbs_api *uapi;
};
struct ib_uverbs_event_queue {
spinlock_t lock;
int is_closed;
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
};
struct ib_uverbs_async_event_file {
struct ib_uobject uobj;
struct ib_uverbs_event_queue ev_queue;
struct ib_event_handler event_handler;
};
struct ib_uverbs_completion_event_file {
struct ib_uobject uobj;
struct ib_uverbs_event_queue ev_queue;
};
struct ib_uverbs_event {
union {
struct ib_uverbs_async_event_desc async;
struct ib_uverbs_comp_event_desc comp;
} desc;
struct list_head list;
struct list_head obj_list;
u32 *counter;
};
struct ib_uverbs_mcast_entry {
struct list_head list;
union ib_gid gid;
u16 lid;
};
struct ib_uevent_object {
struct ib_uobject uobject;
struct ib_uverbs_async_event_file *event_file;
/* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
};
struct ib_uxrcd_object {
struct ib_uobject uobject;
atomic_t refcnt;
};
struct ib_usrq_object {
struct ib_uevent_object uevent;
struct ib_uxrcd_object *uxrcd;
};
struct ib_uqp_object {
struct ib_uevent_object uevent;
/* lock for mcast list */
struct mutex mcast_lock;
struct list_head mcast_list;
struct ib_uxrcd_object *uxrcd;
};
struct ib_uwq_object {
struct ib_uevent_object uevent;
};
struct ib_ucq_object {
struct ib_uevent_object uevent;
struct list_head comp_list;
u32 comp_events_reported;
};
extern const struct file_operations uverbs_event_fops;
extern const struct file_operations uverbs_async_event_fops;
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
int uverbs_async_event_release(struct inode *inode, struct file *filp);
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
void ib_uverbs_release_file(struct kref *ref);
void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
__u64 element, __u64 event,
struct list_head *obj_list, u32 *counter);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
int uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj);
long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
struct ib_uverbs_flow_spec {
union {
union {
struct ib_uverbs_flow_spec_hdr hdr;
struct {
__u32 type;
__u16 size;
__u16 reserved;
};
};
struct ib_uverbs_flow_spec_eth eth;
struct ib_uverbs_flow_spec_ipv4 ipv4;
struct ib_uverbs_flow_spec_esp esp;
struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
struct ib_uverbs_flow_spec_ipv6 ipv6;
struct ib_uverbs_flow_spec_action_tag flow_tag;
struct ib_uverbs_flow_spec_action_drop drop;
struct ib_uverbs_flow_spec_action_handle action;
struct ib_uverbs_flow_spec_action_count flow_count;
};
};
int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
const void *kern_spec_mask,
const void *kern_spec_val,
size_t kern_filter_sz,
union ib_flow_spec *ib_spec);
/*
* ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
* PortInfo CapabilityMask, but was extended with unique bits.
*/
static inline u32 make_port_cap_flags(const struct ib_port_attr *attr)
{
u32 res;
/* All IBA CapabilityMask bits are passed through here, except bit 26,
* which is overridden with IP_BASED_GIDS. This is due to a historical
* mistake in the implementation of IP_BASED_GIDS. Otherwise all other
* bits match the IBA definition across all kernel versions.
*/
res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;
if (attr->ip_gids)
res |= IB_UVERBS_PCF_IP_BASED_GIDS;
return res;
}
static inline struct ib_uverbs_async_event_file *
ib_uverbs_get_async_event(struct uverbs_attr_bundle *attrs,
u16 id)
{
struct ib_uobject *async_ev_file_uobj;
struct ib_uverbs_async_event_file *async_ev_file;
async_ev_file_uobj = uverbs_attr_get_uobject(attrs, id);
if (IS_ERR(async_ev_file_uobj))
async_ev_file = READ_ONCE(attrs->ufile->default_async_file);
else
async_ev_file = container_of(async_ev_file_uobj,
struct ib_uverbs_async_event_file,
uobj);
if (async_ev_file)
uverbs_uobject_get(&async_ev_file->uobj);
return async_ev_file;
}
void copy_port_attr_to_resp(struct ib_port_attr *attr,
struct ib_uverbs_query_port_resp *resp,
struct ib_device *ib_dev, u8 port_num);
#endif /* UVERBS_H */
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* AMD FCH gpio driver platform-data
*
* Copyright (C) 2018 metux IT consult
* Author: Enrico Weigelt <[email protected]>
*
*/
#ifndef __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H
#define __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H
#define AMD_FCH_GPIO_DRIVER_NAME "gpio_amd_fch"
/*
* gpio register index definitions
*/
#define AMD_FCH_GPIO_REG_GPIO49 0x40
#define AMD_FCH_GPIO_REG_GPIO50 0x41
#define AMD_FCH_GPIO_REG_GPIO51 0x42
#define AMD_FCH_GPIO_REG_GPIO55_DEVSLP0 0x43
#define AMD_FCH_GPIO_REG_GPIO57 0x44
#define AMD_FCH_GPIO_REG_GPIO58 0x45
#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46
#define AMD_FCH_GPIO_REG_GPIO64 0x47
#define AMD_FCH_GPIO_REG_GPIO68 0x48
#define AMD_FCH_GPIO_REG_GPIO66_SPKR 0x5B
#define AMD_FCH_GPIO_REG_GPIO71 0x4D
#define AMD_FCH_GPIO_REG_GPIO32_GE1 0x59
#define AMD_FCH_GPIO_REG_GPIO33_GE2 0x5A
#define AMT_FCH_GPIO_REG_GEVT22 0x09
/*
* struct amd_fch_gpio_pdata - GPIO chip platform data
* @gpio_num: number of entries
* @gpio_reg: array of gpio registers
* @gpio_names: array of gpio names
*/
struct amd_fch_gpio_pdata {
int gpio_num;
int *gpio_reg;
const char * const *gpio_names;
};
#endif /* __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H */
|
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mxms.h"
#define ROM16(x) get_unaligned_le16(&(x))
#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
{
return mxm->mxms;
}
u16
mxms_version(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
u16 version = (mxms[4] << 8) | mxms[5];
switch (version ) {
case 0x0200:
case 0x0201:
case 0x0300:
return version;
default:
break;
}
nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
return 0x0000;
}
u16
mxms_headerlen(struct nvkm_mxm *mxm)
{
return 8;
}
u16
mxms_structlen(struct nvkm_mxm *mxm)
{
return *(u16 *)&mxms_data(mxm)[6];
}
bool
mxms_checksum(struct nvkm_mxm *mxm)
{
u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
u8 *mxms = mxms_data(mxm), sum = 0;
while (size--)
sum += *mxms++;
if (sum) {
nvkm_debug(&mxm->subdev, "checksum invalid\n");
return false;
}
return true;
}
bool
mxms_valid(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
if (*(u32 *)mxms != 0x5f4d584d) {
nvkm_debug(&mxm->subdev, "signature invalid\n");
return false;
}
if (!mxms_version(mxm) || !mxms_checksum(mxm))
return false;
return true;
}
bool
mxms_foreach(struct nvkm_mxm *mxm, u8 types,
bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
{
struct nvkm_subdev *subdev = &mxm->subdev;
u8 *mxms = mxms_data(mxm);
u8 *desc = mxms + mxms_headerlen(mxm);
u8 *fini = desc + mxms_structlen(mxm) - 1;
while (desc < fini) {
u8 type = desc[0] & 0x0f;
u8 headerlen = 0;
u8 recordlen = 0;
u8 entries = 0;
switch (type) {
case 0: /* Output Device Structure */
if (mxms_version(mxm) >= 0x0300)
headerlen = 8;
else
headerlen = 6;
break;
case 1: /* System Cooling Capability Structure */
case 2: /* Thermal Structure */
case 3: /* Input Power Structure */
headerlen = 4;
break;
case 4: /* GPIO Device Structure */
headerlen = 4;
recordlen = 2;
entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
break;
case 5: /* Vendor Specific Structure */
headerlen = 8;
break;
case 6: /* Backlight Control Structure */
if (mxms_version(mxm) >= 0x0300) {
headerlen = 4;
recordlen = 8;
entries = (desc[1] & 0xf0) >> 4;
} else {
headerlen = 8;
}
break;
case 7: /* Fan Control Structure */
headerlen = 8;
recordlen = 4;
entries = desc[1] & 0x07;
break;
default:
nvkm_debug(subdev, "unknown descriptor type %d\n", type);
return false;
}
if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
static const char * mxms_desc[] = {
"ODS", "SCCS", "TS", "IPS",
"GSD", "VSS", "BCS", "FCS",
};
u8 *dump = desc;
char data[32], *ptr;
int i, j;
for (j = headerlen - 1, ptr = data; j >= 0; j--)
ptr += sprintf(ptr, "%02x", dump[j]);
dump += headerlen;
nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
for (i = 0; i < entries; i++, dump += recordlen) {
for (j = recordlen - 1, ptr = data; j >= 0; j--)
ptr += sprintf(ptr, "%02x", dump[j]);
nvkm_debug(subdev, " %s\n", data);
}
}
if (types & (1 << type)) {
if (!exec(mxm, desc, info))
return false;
}
desc += headerlen + (entries * recordlen);
}
return true;
}
void
mxms_output_device(struct nvkm_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
{
u64 data = ROM32(pdata[0]);
if (mxms_version(mxm) >= 0x0300)
data |= (u64)ROM16(pdata[4]) << 32;
desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
}
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
if (pfc_en)
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
/* Configure pause time */
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @pfc_en: enabled pfc bitmask
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}
|
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __NBIO_V4_3_H__
#define __NBIO_V4_3_H__
#include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs;
extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs;
extern struct amdgpu_nbio_ras nbio_v4_3_ras;
#endif
|
/*
* DaVinci Power & Sleep Controller (PSC) defines
*
* Copyright (C) 2006 Texas Instruments.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifndef __ASM_ARCH_PSC_H
#define __ASM_ARCH_PSC_H
/* Power and Sleep Controller (PSC) Domains */
#define DAVINCI_GPSC_ARMDOMAIN 0
#define DAVINCI_GPSC_DSPDOMAIN 1
#define DAVINCI_LPSC_VPSSMSTR 0
#define DAVINCI_LPSC_VPSSSLV 1
#define DAVINCI_LPSC_TPCC 2
#define DAVINCI_LPSC_TPTC0 3
#define DAVINCI_LPSC_TPTC1 4
#define DAVINCI_LPSC_EMAC 5
#define DAVINCI_LPSC_EMAC_WRAPPER 6
#define DAVINCI_LPSC_USB 9
#define DAVINCI_LPSC_ATA 10
#define DAVINCI_LPSC_VLYNQ 11
#define DAVINCI_LPSC_UHPI 12
#define DAVINCI_LPSC_DDR_EMIF 13
#define DAVINCI_LPSC_AEMIF 14
#define DAVINCI_LPSC_MMC_SD 15
#define DAVINCI_LPSC_McBSP 17
#define DAVINCI_LPSC_I2C 18
#define DAVINCI_LPSC_UART0 19
#define DAVINCI_LPSC_UART1 20
#define DAVINCI_LPSC_UART2 21
#define DAVINCI_LPSC_SPI 22
#define DAVINCI_LPSC_PWM0 23
#define DAVINCI_LPSC_PWM1 24
#define DAVINCI_LPSC_PWM2 25
#define DAVINCI_LPSC_GPIO 26
#define DAVINCI_LPSC_TIMER0 27
#define DAVINCI_LPSC_TIMER1 28
#define DAVINCI_LPSC_TIMER2 29
#define DAVINCI_LPSC_SYSTEM_SUBSYS 30
#define DAVINCI_LPSC_ARM 31
#define DAVINCI_LPSC_SCR2 32
#define DAVINCI_LPSC_SCR3 33
#define DAVINCI_LPSC_SCR4 34
#define DAVINCI_LPSC_CROSSBAR 35
#define DAVINCI_LPSC_CFG27 36
#define DAVINCI_LPSC_CFG3 37
#define DAVINCI_LPSC_CFG5 38
#define DAVINCI_LPSC_GEM 39
#define DAVINCI_LPSC_IMCOP 40
/* PSC0 defines */
#define DA8XX_LPSC0_TPCC 0
#define DA8XX_LPSC0_TPTC0 1
#define DA8XX_LPSC0_TPTC1 2
#define DA8XX_LPSC0_EMIF25 3
#define DA8XX_LPSC0_SPI0 4
#define DA8XX_LPSC0_MMC_SD 5
#define DA8XX_LPSC0_AINTC 6
#define DA8XX_LPSC0_ARM_RAM_ROM 7
#define DA8XX_LPSC0_SECU_MGR 8
#define DA8XX_LPSC0_UART0 9
#define DA8XX_LPSC0_SCR0_SS 10
#define DA8XX_LPSC0_SCR1_SS 11
#define DA8XX_LPSC0_SCR2_SS 12
#define DA8XX_LPSC0_PRUSS 13
#define DA8XX_LPSC0_ARM 14
#define DA8XX_LPSC0_GEM 15
/* PSC1 defines */
#define DA850_LPSC1_TPCC1 0
#define DA8XX_LPSC1_USB20 1
#define DA8XX_LPSC1_USB11 2
#define DA8XX_LPSC1_GPIO 3
#define DA8XX_LPSC1_UHPI 4
#define DA8XX_LPSC1_CPGMAC 5
#define DA8XX_LPSC1_EMIF3C 6
#define DA8XX_LPSC1_McASP0 7
#define DA830_LPSC1_McASP1 8
#define DA850_LPSC1_SATA 8
#define DA830_LPSC1_McASP2 9
#define DA850_LPSC1_VPIF 9
#define DA8XX_LPSC1_SPI1 10
#define DA8XX_LPSC1_I2C 11
#define DA8XX_LPSC1_UART1 12
#define DA8XX_LPSC1_UART2 13
#define DA850_LPSC1_McBSP0 14
#define DA850_LPSC1_McBSP1 15
#define DA8XX_LPSC1_LCDC 16
#define DA8XX_LPSC1_PWM 17
#define DA850_LPSC1_MMC_SD1 18
#define DA8XX_LPSC1_ECAP 20
#define DA830_LPSC1_EQEP 21
#define DA850_LPSC1_TPTC2 21
#define DA8XX_LPSC1_SCR_P0_SS 24
#define DA8XX_LPSC1_SCR_P1_SS 25
#define DA8XX_LPSC1_CR_P3_SS 26
#define DA8XX_LPSC1_L3_CBA_RAM 31
/* PSC register offsets */
#define EPCPR 0x070
#define PTCMD 0x120
#define PTSTAT 0x128
#define PDSTAT 0x200
#define PDCTL 0x300
#define MDSTAT 0x800
#define MDCTL 0xA00
/* PSC module states */
#define PSC_STATE_SWRSTDISABLE 0
#define PSC_STATE_SYNCRST 1
#define PSC_STATE_DISABLE 2
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x3f
#define PDSTAT_STATE_MASK 0x1f
#define MDCTL_LRST BIT(8)
#define MDCTL_FORCE BIT(31)
#define PDCTL_NEXT BIT(0)
#define PDCTL_EPCGOOD BIT(8)
#endif /* __ASM_ARCH_PSC_H */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.