code
stringlengths 0
23.9M
|
---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER5 (not POWER5++) processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#include "internal.h"
/*
* Bits in event code for POWER5 (not POWER5++)
*/
#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
#define PM_PMC_MSK 0xf
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
#define PM_UNIT_MSK 0xf
#define PM_BYTE_SH 12 /* Byte number of event bus to use */
#define PM_BYTE_MSK 7
#define PM_GRS_SH 8 /* Storage subsystem mux select */
#define PM_GRS_MSK 7
#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
#define PM_PMCSEL_MSK 0x7f
/* Values in PM_UNIT field */
#define PM_FPU 0
#define PM_ISU0 1
#define PM_IFU 2
#define PM_ISU1 3
#define PM_IDU 4
#define PM_ISU0_ALT 6
#define PM_GRS 7
#define PM_LSU0 8
#define PM_LSU1 0xc
#define PM_LASTUNIT 0xc
/*
* Bits in MMCR1 for POWER5
*/
#define MMCR1_TTM0SEL_SH 62
#define MMCR1_TTM1SEL_SH 60
#define MMCR1_TTM2SEL_SH 58
#define MMCR1_TTM3SEL_SH 56
#define MMCR1_TTMSEL_MSK 3
#define MMCR1_TD_CP_DBG0SEL_SH 54
#define MMCR1_TD_CP_DBG1SEL_SH 52
#define MMCR1_TD_CP_DBG2SEL_SH 50
#define MMCR1_TD_CP_DBG3SEL_SH 48
#define MMCR1_GRS_L2SEL_SH 46
#define MMCR1_GRS_L2SEL_MSK 3
#define MMCR1_GRS_L3SEL_SH 44
#define MMCR1_GRS_L3SEL_MSK 3
#define MMCR1_GRS_MCSEL_SH 41
#define MMCR1_GRS_MCSEL_MSK 7
#define MMCR1_GRS_FABSEL_SH 39
#define MMCR1_GRS_FABSEL_MSK 3
#define MMCR1_PMC1_ADDER_SEL_SH 35
#define MMCR1_PMC2_ADDER_SEL_SH 34
#define MMCR1_PMC3_ADDER_SEL_SH 33
#define MMCR1_PMC4_ADDER_SEL_SH 32
#define MMCR1_PMC1SEL_SH 25
#define MMCR1_PMC2SEL_SH 17
#define MMCR1_PMC3SEL_SH 9
#define MMCR1_PMC4SEL_SH 1
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0x7f
/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
* <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><>
* T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1
*
* T0 - TTM0 constraint
* 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000
*
* T1 - TTM1 constraint
* 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000
*
* NC - number of counters
* 51: NC error 0x0008_0000_0000_0000
* 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
*
* G0..G3 - GRS mux constraints
* 46-47: GRS_L2SEL value
* 44-45: GRS_L3SEL value
* 41-44: GRS_MCSEL value
* 39-40: GRS_FABSEL value
* Note that these match up with their bit positions in MMCR1
*
* UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
* 37: UC3 error 0x20_0000_0000
* 36: FPU|IFU|ISU1 events needed 0x10_0000_0000
* 35: ISU0 events needed 0x08_0000_0000
* 34: IDU|GRS events needed 0x04_0000_0000
*
* PS1
* 33: PS1 error 0x2_0000_0000
* 31-32: count of events needing PMC1/2 0x1_8000_0000
*
* PS2
* 30: PS2 error 0x4000_0000
* 28-29: count of events needing PMC3/4 0x3000_0000
*
* B0
* 24-27: Byte 0 event source 0x0f00_0000
* Encoding as for the event code
*
* B1, B2, B3
* 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
*
* P1..P6
* 0-11: Count of events needing PMC1..PMC6
*/
static const int grsel_shift[8] = {
MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
};
/* Masks and values for using events from the various units */
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
[PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
[PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
[PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
[PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
[PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
};
static int power5_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
sh = (pmc - 1) * 2;
mask |= 2 << sh;
value |= 1 << sh;
if (pmc <= 4)
grp = (pmc - 1) >> 1;
else if (event != 0x500009 && event != 0x600005)
return -1;
}
if (event & PM_BUSEVENT_MSK) {
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
if (unit > PM_LASTUNIT)
return -1;
if (unit == PM_ISU0_ALT)
unit = PM_ISU0;
mask |= unit_cons[unit][0];
value |= unit_cons[unit][1];
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
if (byte >= 4) {
if (unit != PM_LSU1)
return -1;
/* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
++unit;
byte &= 3;
}
if (unit == PM_GRS) {
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
mask |= (unsigned long)fmask << sh;
value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
<< sh;
}
/*
* Bus events on bytes 0 and 2 can be counted
* on PMC1/2; bytes 1 and 3 on PMC3/4.
*/
if (!pmc)
grp = byte & 1;
/* Set byte lane select field */
mask |= 0xfUL << (24 - 4 * byte);
value |= (unsigned long)unit << (24 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2 field */
mask |= 0x200000000ul;
value |= 0x080000000ul;
} else if (grp == 1) {
/* increment PMC3/4 field */
mask |= 0x40000000ul;
value |= 0x10000000ul;
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
mask |= 0x8000000000000ul;
value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
return 0;
}
#define MAX_ALT 3 /* at most 3 alternatives for any event */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
{ 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
{ 0x100005, 0x600005 }, /* PM_RUN_CYC */
{ 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */
{ 0x300009, 0x400009 }, /* PM_INST_DISP */
};
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(u64 event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static const unsigned char bytedecode_alternatives[4][4] = {
/* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
/* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
/* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
/* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
};
/*
* Some direct events for decodes of event bus byte 3 have alternative
* PMCSEL values on other counters. This returns the alternative
* event code for those that do, or -1 otherwise.
*/
static s64 find_alternative_bdecode(u64 event)
{
int pmc, altpmc, pp, j;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc == 0 || pmc > 4)
return -1;
altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
pp = event & PM_PMCSEL_MSK;
for (j = 0; j < 4; ++j) {
if (bytedecode_alternatives[pmc - 1][j] == pp) {
return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
(altpmc << PM_PMC_SH) |
bytedecode_alternatives[altpmc - 1][j];
}
}
return -1;
}
static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int i, j, nalt = 1;
s64 ae;
alt[0] = event;
nalt = 1;
i = find_alternative(event);
if (i >= 0) {
for (j = 0; j < MAX_ALT; ++j) {
ae = event_alternatives[i][j];
if (ae && ae != event)
alt[nalt++] = ae;
}
} else {
ae = find_alternative_bdecode(event);
if (ae > 0)
alt[nalt++] = ae;
}
return nalt;
}
/*
* Map of which direct events on which PMCs are marked instruction events.
* Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
* Bit 0 is set if it is marked for all PMCs.
* The 0x80 bit indicates a byte decode PMCSEL value.
*/
static unsigned char direct_event_is_marked[0x28] = {
0, /* 00 */
0x1f, /* 01 PM_IOPS_CMPL */
0x2, /* 02 PM_MRK_GRP_DISP */
0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
0, /* 04 */
0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
0x80, /* 06 */
0x80, /* 07 */
0, 0, 0,/* 08 - 0a */
0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
0, /* 0c */
0x80, /* 0d */
0x80, /* 0e */
0, /* 0f */
0, /* 10 */
0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
0, /* 12 */
0x10, /* 13 PM_MRK_GRP_CMPL */
0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
0x2, /* 15 PM_MRK_GRP_ISSUED */
0x80, /* 16 */
0x80, /* 17 */
0, 0, 0, 0, 0,
0x80, /* 1d */
0x80, /* 1e */
0, /* 1f */
0x80, /* 20 */
0x80, /* 21 */
0x80, /* 22 */
0x80, /* 23 */
0x80, /* 24 */
0x80, /* 25 */
0x80, /* 26 */
0x80, /* 27 */
};
/*
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
*/
static int power5_marked_instr_event(u64 event)
{
int pmc, psel;
int bit, byte, unit;
u32 mask;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
psel = event & PM_PMCSEL_MSK;
if (pmc >= 5)
return 0;
bit = -1;
if (psel < sizeof(direct_event_is_marked)) {
if (direct_event_is_marked[psel] & (1 << pmc))
return 1;
if (direct_event_is_marked[psel] & 0x80)
bit = 4;
else if (psel == 0x08)
bit = pmc - 1;
else if (psel == 0x10)
bit = 4 - pmc;
else if (psel == 0x1b && (pmc == 1 || pmc == 3))
bit = 4;
} else if ((psel & 0x58) == 0x40)
bit = psel & 7;
if (!(event & PM_BUSEVENT_MSK))
return 0;
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
if (unit == PM_LSU0) {
/* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
mask = 0x5dff00;
} else if (unit == PM_LSU1 && byte >= 4) {
byte -= 4;
/* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */
mask = 0x5f00c0aa;
} else
return 0;
return (mask >> (byte * 8 + bit)) & 1;
}
static int power5_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
struct perf_event *pevents[],
u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
int i, isbus, bit, grsel;
unsigned int pmc_inuse = 0;
unsigned int pmc_grp_use[2];
unsigned char busbyte[4];
unsigned char unituse[16];
int ttmuse;
if (n_ev > 6)
return -1;
/* First pass to count resource use */
pmc_grp_use[0] = pmc_grp_use[1] = 0;
memset(busbyte, 0, sizeof(busbyte));
memset(unituse, 0, sizeof(unituse));
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
pmc_inuse |= 1 << (pmc - 1);
/* count 1/2 vs 3/4 use */
if (pmc <= 4)
++pmc_grp_use[(pmc - 1) >> 1];
}
if (event[i] & PM_BUSEVENT_MSK) {
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
if (unit > PM_LASTUNIT)
return -1;
if (unit == PM_ISU0_ALT)
unit = PM_ISU0;
if (byte >= 4) {
if (unit != PM_LSU1)
return -1;
++unit;
byte &= 3;
}
if (!pmc)
++pmc_grp_use[byte & 1];
if (busbyte[byte] && busbyte[byte] != unit)
return -1;
busbyte[byte] = unit;
unituse[unit] = 1;
}
}
if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2)
return -1;
/*
* Assign resources and set multiplexer selects.
*
* PM_ISU0 can go either on TTM0 or TTM1, but that's the only
* choice we have to deal with.
*/
if (unituse[PM_ISU0] &
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
unituse[PM_ISU0] = 0;
}
/* Set TTM[01]SEL fields. */
ttmuse = 0;
for (i = PM_FPU; i <= PM_ISU1; ++i) {
if (!unituse[i])
continue;
if (ttmuse++)
return -1;
mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
if (!unituse[i])
continue;
if (ttmuse++)
return -1;
mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
/* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
for (byte = 0; byte < 4; ++byte) {
unit = busbyte[byte];
if (!unit)
continue;
if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
/* get ISU0 through TTM1 rather than TTM0 */
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
mmcr1 |= (unsigned long)ttm
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
psel = event[i] & PM_PMCSEL_MSK;
isbus = event[i] & PM_BUSEVENT_MSK;
if (!pmc) {
/* Bus event or any-PMC direct event */
for (pmc = 0; pmc < 4; ++pmc) {
if (pmc_inuse & (1 << pmc))
continue;
grp = (pmc >> 1) & 1;
if (isbus) {
if (grp == (byte & 1))
break;
} else if (pmc_grp_use[grp] < 2) {
++pmc_grp_use[grp];
break;
}
}
pmc_inuse |= 1 << pmc;
} else if (pmc <= 4) {
/* Direct event */
--pmc;
if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
/* add events on higher-numbered bus */
mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
}
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
if (pmc <= 3)
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
hwc[i] = pmc;
}
/* Return MMCRx values */
mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
mmcr->mmcr0 |= MMCR0_PMCjCE;
mmcr->mmcr1 = mmcr1;
mmcr->mmcra = mmcra;
return 0;
}
static void power5_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
}
static int power5_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0xf,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */
[PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x4c1090, 0x3c1088 },
[C(OP_WRITE)] = { 0x3c1090, 0xc10c3 },
[C(OP_PREFETCH)] = { 0xc70e7, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x3c309b },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0xc50c3, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x2c4090, 0x800c4 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x800c0 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x230e4, 0x230e5 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
static struct power_pmu power5_pmu = {
.name = "POWER5",
.n_counter = 6,
.max_alternatives = MAX_ALT,
.add_fields = 0x7000090000555ul,
.test_adder = 0x3000490000000ul,
.compute_mmcr = power5_compute_mmcr,
.get_constraint = power5_get_constraint,
.get_alternatives = power5_get_alternatives,
.disable_pmc = power5_disable_pmc,
.n_generic = ARRAY_SIZE(power5_generic_events),
.generic_events = power5_generic_events,
.cache_events = &power5_cache_events,
.flags = PPMU_HAS_SSLOT,
};
int __init init_power5_pmu(void)
{
unsigned int pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) != PVR_POWER5)
return -ENODEV;
return register_power_pmu(&power5_pmu);
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtio-mem device driver.
*
* Copyright Red Hat, Inc. 2020
*
* Author(s): David Hildenbrand <[email protected]>
*/
#include <linux/virtio.h>
#include <linux/virtio_mem.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/hrtimer.h>
#include <linux/crash_dump.h>
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/lockdep.h>
#include <linux/log2.h>
#include <linux/vmalloc.h>
#include <linux/suspend.h>
#include <acpi/acpi_numa.h>
static bool unplug_online = true;
module_param(unplug_online, bool, 0644);
MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
static bool force_bbm;
module_param(force_bbm, bool, 0444);
MODULE_PARM_DESC(force_bbm,
"Force Big Block Mode. Default is 0 (auto-selection)");
static unsigned long bbm_block_size;
module_param(bbm_block_size, ulong, 0444);
MODULE_PARM_DESC(bbm_block_size,
"Big Block size in bytes. Default is 0 (auto-detection).");
/*
* virtio-mem currently supports the following modes of operation:
*
* * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
* size of a Sub Block (SB) is determined based on the device block size, the
* pageblock size, and the maximum allocation granularity of the buddy.
* Subblocks within a Linux memory block might either be plugged or unplugged.
* Memory is added/removed to Linux MM in Linux memory block granularity.
*
* * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
* Memory is added/removed to Linux MM in Big Block granularity.
*
* The mode is determined automatically based on the Linux memory block size
* and the device block size.
*
* User space / core MM (auto onlining) is responsible for onlining added
* Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
* always onlined separately, and all memory within a Linux memory block is
* onlined to the same zone - virtio-mem relies on this behavior.
*/
/*
* State of a Linux memory block in SBM.
*/
enum virtio_mem_sbm_mb_state {
/* Unplugged, not added to Linux. Can be reused later. */
VIRTIO_MEM_SBM_MB_UNUSED = 0,
/* (Partially) plugged, not added to Linux. Error on add_memory(). */
VIRTIO_MEM_SBM_MB_PLUGGED,
/* Fully plugged, fully added to Linux, offline. */
VIRTIO_MEM_SBM_MB_OFFLINE,
/* Partially plugged, fully added to Linux, offline. */
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
/* Fully plugged, fully added to Linux, onlined to a kernel zone. */
VIRTIO_MEM_SBM_MB_KERNEL,
/* Partially plugged, fully added to Linux, online to a kernel zone */
VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
/* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
VIRTIO_MEM_SBM_MB_MOVABLE,
/* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
VIRTIO_MEM_SBM_MB_COUNT
};
/*
* State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
*/
enum virtio_mem_bbm_bb_state {
/* Unplugged, not added to Linux. Can be reused later. */
VIRTIO_MEM_BBM_BB_UNUSED = 0,
/* Plugged, not added to Linux. Error on add_memory(). */
VIRTIO_MEM_BBM_BB_PLUGGED,
/* Plugged and added to Linux. */
VIRTIO_MEM_BBM_BB_ADDED,
/* All online parts are fake-offline, ready to remove. */
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
VIRTIO_MEM_BBM_BB_COUNT
};
struct virtio_mem {
struct virtio_device *vdev;
/* We might first have to unplug all memory when starting up. */
bool unplug_all_required;
/* Workqueue that processes the plug/unplug requests. */
struct work_struct wq;
atomic_t wq_active;
atomic_t config_changed;
/* Virtqueue for guest->host requests. */
struct virtqueue *vq;
/* Wait for a host response to a guest request. */
wait_queue_head_t host_resp;
/* Space for one guest request and the host response. */
struct virtio_mem_req req;
struct virtio_mem_resp resp;
/* The current size of the device. */
uint64_t plugged_size;
/* The requested size of the device. */
uint64_t requested_size;
/* The device block size (for communicating with the device). */
uint64_t device_block_size;
/* The determined node id for all memory of the device. */
int nid;
/* Physical start address of the memory region. */
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
/*
* Copy of "System RAM (virtio_mem)" to be used for
* add_memory_driver_managed().
*/
const char *resource_name;
/* Memory group identification. */
int mgid;
/*
* We don't want to add too much memory if it's not getting onlined,
* to avoid running OOM. Besides this threshold, we allow to have at
* least two offline blocks at a time (whatever is bigger).
*/
#define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
atomic64_t offline_size;
uint64_t offline_threshold;
/* If set, the driver is in SBM, otherwise in BBM. */
bool in_sbm;
union {
struct {
/* Id of the first memory block of this device. */
unsigned long first_mb_id;
/* Id of the last usable memory block of this device. */
unsigned long last_usable_mb_id;
/* Id of the next memory bock to prepare when needed. */
unsigned long next_mb_id;
/* The subblock size. */
uint64_t sb_size;
/* The number of subblocks per Linux memory block. */
uint32_t sbs_per_mb;
/*
* Some of the Linux memory blocks tracked as "partially
* plugged" are completely unplugged and can be offlined
* and removed -- which previously failed.
*/
bool have_unplugged_mb;
/* Summary of all memory block states. */
unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
/*
* One byte state per memory block. Allocated via
* vmalloc(). Resized (alloc+copy+free) on demand.
*
* With 128 MiB memory blocks, we have states for 512
* GiB of memory in one 4 KiB page.
*/
uint8_t *mb_states;
/*
* Bitmap: one bit per subblock. Allocated similar to
* sbm.mb_states.
*
* A set bit means the corresponding subblock is
* plugged, otherwise it's unblocked.
*
* With 4 MiB subblocks, we manage 128 GiB of memory
* in one 4 KiB page.
*/
unsigned long *sb_states;
} sbm;
struct {
/* Id of the first big block of this device. */
unsigned long first_bb_id;
/* Id of the last usable big block of this device. */
unsigned long last_usable_bb_id;
/* Id of the next device bock to prepare when needed. */
unsigned long next_bb_id;
/* Summary of all big block states. */
unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
/* One byte state per big block. See sbm.mb_states. */
uint8_t *bb_states;
/* The block size used for plugging/adding/removing. */
uint64_t bb_size;
} bbm;
};
/*
* Mutex that protects the sbm.mb_count, sbm.mb_states,
* sbm.sb_states, bbm.bb_count, and bbm.bb_states
*
* When this lock is held the pointers can't change, ONLINE and
* OFFLINE blocks can't change the state and no subblocks will get
* plugged/unplugged.
*
* In kdump mode, used to serialize requests, last_block_addr and
* last_block_plugged.
*/
struct mutex hotplug_mutex;
bool hotplug_active;
/* An error occurred we cannot handle - stop processing requests. */
bool broken;
/* Cached valued of is_kdump_kernel() when the device was probed. */
bool in_kdump;
/* The driver is being removed. */
spinlock_t removal_lock;
bool removing;
/* Timer for retrying to plug/unplug memory. */
struct hrtimer retry_timer;
unsigned int retry_timer_ms;
#define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
#define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
/* Notifier to block hibernation image storing/reloading. */
struct notifier_block pm_notifier;
#ifdef CONFIG_PROC_VMCORE
/* vmcore callback for /proc/vmcore handling in kdump mode */
struct vmcore_cb vmcore_cb;
uint64_t last_block_addr;
bool last_block_plugged;
#endif /* CONFIG_PROC_VMCORE */
/* Next device in the list of virtio-mem devices. */
struct list_head next;
};
/*
* We have to share a single online_page callback among all virtio-mem
* devices. We use RCU to iterate the list in the callback.
*/
static DEFINE_MUTEX(virtio_mem_mutex);
static LIST_HEAD(virtio_mem_devices);
static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
unsigned long nr_pages);
static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long nr_pages);
static void virtio_mem_retry(struct virtio_mem *vm);
static int virtio_mem_create_resource(struct virtio_mem *vm);
static void virtio_mem_delete_resource(struct virtio_mem *vm);
/*
* Register a virtio-mem device so it will be considered for the online_page
* callback.
*/
static int register_virtio_mem_device(struct virtio_mem *vm)
{
int rc = 0;
/* First device registers the callback. */
mutex_lock(&virtio_mem_mutex);
if (list_empty(&virtio_mem_devices))
rc = set_online_page_callback(&virtio_mem_online_page_cb);
if (!rc)
list_add_rcu(&vm->next, &virtio_mem_devices);
mutex_unlock(&virtio_mem_mutex);
return rc;
}
/*
* Unregister a virtio-mem device so it will no longer be considered for the
* online_page callback.
*/
static void unregister_virtio_mem_device(struct virtio_mem *vm)
{
/* Last device unregisters the callback. */
mutex_lock(&virtio_mem_mutex);
list_del_rcu(&vm->next);
if (list_empty(&virtio_mem_devices))
restore_online_page_callback(&virtio_mem_online_page_cb);
mutex_unlock(&virtio_mem_mutex);
synchronize_rcu();
}
/*
* Calculate the memory block id of a given address.
*/
static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
{
return addr / memory_block_size_bytes();
}
/*
* Calculate the physical start address of a given memory block id.
*/
static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
{
return mb_id * memory_block_size_bytes();
}
/*
* Calculate the big block id of a given address.
*/
static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
uint64_t addr)
{
return addr / vm->bbm.bb_size;
}
/*
* Calculate the physical start address of a given big block id.
*/
static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
unsigned long bb_id)
{
return bb_id * vm->bbm.bb_size;
}
/*
* Calculate the subblock id of a given address.
*/
static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
unsigned long addr)
{
const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
return (addr - mb_addr) / vm->sbm.sb_size;
}
/*
* Set the state of a big block, taking care of the state counter.
*/
static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
unsigned long bb_id,
enum virtio_mem_bbm_bb_state state)
{
const unsigned long idx = bb_id - vm->bbm.first_bb_id;
enum virtio_mem_bbm_bb_state old_state;
old_state = vm->bbm.bb_states[idx];
vm->bbm.bb_states[idx] = state;
BUG_ON(vm->bbm.bb_count[old_state] == 0);
vm->bbm.bb_count[old_state]--;
vm->bbm.bb_count[state]++;
}
/*
* Get the state of a big block.
*/
static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
unsigned long bb_id)
{
return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
}
/*
* Prepare the big block state array for the next big block.
*/
static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
{
unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
unsigned long new_bytes = old_bytes + 1;
int old_pages = PFN_UP(old_bytes);
int new_pages = PFN_UP(new_bytes);
uint8_t *new_array;
if (vm->bbm.bb_states && old_pages == new_pages)
return 0;
new_array = vzalloc(new_pages * PAGE_SIZE);
if (!new_array)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
if (vm->bbm.bb_states)
memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
vfree(vm->bbm.bb_states);
vm->bbm.bb_states = new_array;
mutex_unlock(&vm->hotplug_mutex);
return 0;
}
#define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
for (_bb_id = vm->bbm.first_bb_id; \
_bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
_bb_id++) \
if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
#define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
for (_bb_id = vm->bbm.next_bb_id - 1; \
_bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
_bb_id--) \
if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
/*
* Set the state of a memory block, taking care of the state counter.
*/
static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
unsigned long mb_id, uint8_t state)
{
const unsigned long idx = mb_id - vm->sbm.first_mb_id;
uint8_t old_state;
old_state = vm->sbm.mb_states[idx];
vm->sbm.mb_states[idx] = state;
BUG_ON(vm->sbm.mb_count[old_state] == 0);
vm->sbm.mb_count[old_state]--;
vm->sbm.mb_count[state]++;
}
/*
* Get the state of a memory block.
*/
static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
unsigned long mb_id)
{
const unsigned long idx = mb_id - vm->sbm.first_mb_id;
return vm->sbm.mb_states[idx];
}
/*
* Prepare the state array for the next memory block.
*/
static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
{
int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
uint8_t *new_array;
if (vm->sbm.mb_states && old_pages == new_pages)
return 0;
new_array = vzalloc(new_pages * PAGE_SIZE);
if (!new_array)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
if (vm->sbm.mb_states)
memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
vfree(vm->sbm.mb_states);
vm->sbm.mb_states = new_array;
mutex_unlock(&vm->hotplug_mutex);
return 0;
}
#define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
for (_mb_id = _vm->sbm.first_mb_id; \
_mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id++) \
if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
#define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
for (_mb_id = _vm->sbm.next_mb_id - 1; \
_mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id--) \
if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
/*
* Calculate the bit number in the subblock bitmap for the given subblock
* inside the given memory block.
*/
static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
unsigned long mb_id, int sb_id)
{
return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
}
/*
* Mark all selected subblocks plugged.
*
* Will not modify the state of the memory block.
*/
static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
unsigned long mb_id, int sb_id,
int count)
{
const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
__bitmap_set(vm->sbm.sb_states, bit, count);
}
/*
* Mark all selected subblocks unplugged.
*
* Will not modify the state of the memory block.
*/
static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
unsigned long mb_id, int sb_id,
int count)
{
const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
__bitmap_clear(vm->sbm.sb_states, bit, count);
}
/*
* Test if all selected subblocks are plugged.
*/
static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
unsigned long mb_id, int sb_id,
int count)
{
const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
if (count == 1)
return test_bit(bit, vm->sbm.sb_states);
/* TODO: Helper similar to bitmap_set() */
return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
bit + count;
}
/*
* Test if all selected subblocks are unplugged.
*/
static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
unsigned long mb_id, int sb_id,
int count)
{
const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
/* TODO: Helper similar to bitmap_set() */
return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
bit + count;
}
/*
* Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
* none.
*/
static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
unsigned long mb_id)
{
const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
return find_next_zero_bit(vm->sbm.sb_states,
bit + vm->sbm.sbs_per_mb, bit) - bit;
}
/*
* Prepare the subblock bitmap for the next memory block.
*/
static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
{
const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
unsigned long *new_bitmap, *old_bitmap;
if (vm->sbm.sb_states && old_pages == new_pages)
return 0;
new_bitmap = vzalloc(new_pages * PAGE_SIZE);
if (!new_bitmap)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
if (vm->sbm.sb_states)
memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
old_bitmap = vm->sbm.sb_states;
vm->sbm.sb_states = new_bitmap;
mutex_unlock(&vm->hotplug_mutex);
vfree(old_bitmap);
return 0;
}
/*
* Test if we could add memory without creating too much offline memory -
* to avoid running OOM if memory is getting onlined deferred.
*/
static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
{
if (WARN_ON_ONCE(size > vm->offline_threshold))
return false;
return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
}
/*
* Try adding memory to Linux. Will usually only fail if out of memory.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
* Will not modify the state of memory blocks in virtio-mem.
*/
static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
uint64_t size)
{
int rc;
/*
* When force-unloading the driver and we still have memory added to
* Linux, the resource name has to stay.
*/
if (!vm->resource_name) {
vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
GFP_KERNEL);
if (!vm->resource_name)
return -ENOMEM;
}
dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
/* Memory might get onlined immediately. */
atomic64_add(size, &vm->offline_size);
rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
if (rc) {
atomic64_sub(size, &vm->offline_size);
dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
/*
* TODO: Linux MM does not properly clean up yet in all cases
* where adding of memory failed - especially on -ENOMEM.
*/
}
return rc;
}
/*
* See virtio_mem_add_memory(): Try adding a single Linux memory block.
*/
static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
const uint64_t size = memory_block_size_bytes();
return virtio_mem_add_memory(vm, addr, size);
}
/*
* See virtio_mem_add_memory(): Try adding a big block.
*/
static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
{
const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
const uint64_t size = vm->bbm.bb_size;
return virtio_mem_add_memory(vm, addr, size);
}
/*
* Try removing memory from Linux. Will only fail if memory blocks aren't
* offline.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
* Will not modify the state of memory blocks in virtio-mem.
*/
static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
uint64_t size)
{
int rc;
dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
rc = remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
* We might have freed up memory we can now unplug, retry
* immediately instead of waiting.
*/
virtio_mem_retry(vm);
} else {
dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
}
return rc;
}
/*
* See virtio_mem_remove_memory(): Try removing a single Linux memory block.
*/
static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
const uint64_t size = memory_block_size_bytes();
return virtio_mem_remove_memory(vm, addr, size);
}
/*
* Try offlining and removing memory from Linux.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
* Will not modify the state of memory blocks in virtio-mem.
*/
static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
uint64_t addr,
uint64_t size)
{
int rc;
dev_dbg(&vm->vdev->dev,
"offlining and removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
rc = offline_and_remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
* We might have freed up memory we can now unplug, retry
* immediately instead of waiting.
*/
virtio_mem_retry(vm);
return 0;
}
dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc);
/*
* We don't really expect this to fail, because we fake-offlined all
* memory already. But it could fail in corner cases.
*/
WARN_ON_ONCE(rc != -ENOMEM && rc != -EBUSY);
return rc == -ENOMEM ? -ENOMEM : -EBUSY;
}
/*
* See virtio_mem_offline_and_remove_memory(): Try offlining and removing
* a single Linux memory block.
*/
static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
const uint64_t size = memory_block_size_bytes();
return virtio_mem_offline_and_remove_memory(vm, addr, size);
}
/*
* Try (offlining and) removing memory from Linux in case all subblocks are
* unplugged. Can be called on online and offline memory blocks.
*
* May modify the state of memory blocks in virtio-mem.
*/
static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm,
unsigned long mb_id)
{
int rc;
/*
* Once all subblocks of a memory block were unplugged, offline and
* remove it.
*/
if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
return 0;
/* offline_and_remove_memory() works for online and offline memory. */
mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
mutex_lock(&vm->hotplug_mutex);
if (!rc)
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_UNUSED);
return rc;
}
/*
* See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
* all Linux memory blocks covered by the big block.
*/
static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
unsigned long bb_id)
{
const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
const uint64_t size = vm->bbm.bb_size;
return virtio_mem_offline_and_remove_memory(vm, addr, size);
}
/*
* Trigger the workqueue so the device can perform its magic.
*/
static void virtio_mem_retry(struct virtio_mem *vm)
{
unsigned long flags;
spin_lock_irqsave(&vm->removal_lock, flags);
if (!vm->removing)
queue_work(system_freezable_wq, &vm->wq);
spin_unlock_irqrestore(&vm->removal_lock, flags);
}
static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
{
int node = NUMA_NO_NODE;
#if defined(CONFIG_ACPI_NUMA)
if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
node = pxm_to_node(node_id);
#endif
return node;
}
/*
* Test if a virtio-mem device overlaps with the given range. Can be called
* from (notifier) callbacks lockless.
*/
static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
uint64_t size)
{
return start < vm->addr + vm->region_size && vm->addr < start + size;
}
/*
* Test if a virtio-mem device contains a given range. Can be called from
* (notifier) callbacks lockless.
*/
static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
uint64_t size)
{
return start >= vm->addr && start + size <= vm->addr + vm->region_size;
}
static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
unsigned long mb_id)
{
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
case VIRTIO_MEM_SBM_MB_OFFLINE:
return NOTIFY_OK;
default:
break;
}
dev_warn_ratelimited(&vm->vdev->dev,
"memory block onlining denied\n");
return NOTIFY_BAD;
}
static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
break;
case VIRTIO_MEM_SBM_MB_KERNEL:
case VIRTIO_MEM_SBM_MB_MOVABLE:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE);
break;
default:
BUG();
break;
}
}
static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
unsigned long mb_id,
unsigned long start_pfn)
{
const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
int new_state;
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
if (is_movable)
new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
break;
case VIRTIO_MEM_SBM_MB_OFFLINE:
new_state = VIRTIO_MEM_SBM_MB_KERNEL;
if (is_movable)
new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
break;
default:
BUG();
break;
}
virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
}
static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
int sb_id;
for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
virtio_mem_fake_offline_going_offline(pfn, nr_pages);
}
}
static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
int sb_id;
for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
}
}
static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
unsigned long bb_id,
unsigned long pfn,
unsigned long nr_pages)
{
/*
* When marked as "fake-offline", all online memory of this device block
* is allocated by us. Otherwise, we don't have any memory allocated.
*/
if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
return;
virtio_mem_fake_offline_going_offline(pfn, nr_pages);
}
static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
unsigned long bb_id,
unsigned long pfn,
unsigned long nr_pages)
{
if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
return;
virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
}
/*
* This callback will either be called synchronously from add_memory() or
* asynchronously (e.g., triggered via user space). We have to be careful
* with locking when calling add_memory().
*/
static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct virtio_mem *vm = container_of(nb, struct virtio_mem,
memory_notifier);
struct memory_notify *mhp = arg;
const unsigned long start = PFN_PHYS(mhp->start_pfn);
const unsigned long size = PFN_PHYS(mhp->nr_pages);
int rc = NOTIFY_OK;
unsigned long id;
if (!virtio_mem_overlaps_range(vm, start, size))
return NOTIFY_DONE;
if (vm->in_sbm) {
id = virtio_mem_phys_to_mb_id(start);
/*
* In SBM, we add memory in separate memory blocks - we expect
* it to be onlined/offlined in the same granularity. Bail out
* if this ever changes.
*/
if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
!IS_ALIGNED(start, memory_block_size_bytes())))
return NOTIFY_BAD;
} else {
id = virtio_mem_phys_to_bb_id(vm, start);
/*
* In BBM, we only care about onlining/offlining happening
* within a single big block, we don't care about the
* actual granularity as we don't track individual Linux
* memory blocks.
*/
if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
return NOTIFY_BAD;
}
/*
* Avoid circular locking lockdep warnings. We lock the mutex
* e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
* blocking_notifier_call_chain() has it's own lock, which gets unlocked
* between both notifier calls and will bail out. False positive.
*/
lockdep_off();
switch (action) {
case MEM_GOING_OFFLINE:
mutex_lock(&vm->hotplug_mutex);
if (vm->removing) {
rc = notifier_from_errno(-EBUSY);
mutex_unlock(&vm->hotplug_mutex);
break;
}
vm->hotplug_active = true;
if (vm->in_sbm)
virtio_mem_sbm_notify_going_offline(vm, id);
else
virtio_mem_bbm_notify_going_offline(vm, id,
mhp->start_pfn,
mhp->nr_pages);
break;
case MEM_GOING_ONLINE:
mutex_lock(&vm->hotplug_mutex);
if (vm->removing) {
rc = notifier_from_errno(-EBUSY);
mutex_unlock(&vm->hotplug_mutex);
break;
}
vm->hotplug_active = true;
if (vm->in_sbm)
rc = virtio_mem_sbm_notify_going_online(vm, id);
break;
case MEM_OFFLINE:
if (vm->in_sbm)
virtio_mem_sbm_notify_offline(vm, id);
atomic64_add(size, &vm->offline_size);
/*
* Trigger the workqueue. Now that we have some offline memory,
* maybe we can handle pending unplug requests.
*/
if (!unplug_online)
virtio_mem_retry(vm);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_ONLINE:
if (vm->in_sbm)
virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
atomic64_sub(size, &vm->offline_size);
/*
* Start adding more memory once we onlined half of our
* threshold. Don't trigger if it's possibly due to our actipn
* (e.g., us adding memory which gets onlined immediately from
* the core).
*/
if (!atomic_read(&vm->wq_active) &&
virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
virtio_mem_retry(vm);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_CANCEL_OFFLINE:
if (!vm->hotplug_active)
break;
if (vm->in_sbm)
virtio_mem_sbm_notify_cancel_offline(vm, id);
else
virtio_mem_bbm_notify_cancel_offline(vm, id,
mhp->start_pfn,
mhp->nr_pages);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_CANCEL_ONLINE:
if (!vm->hotplug_active)
break;
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
default:
break;
}
lockdep_on();
return rc;
}
static int virtio_mem_pm_notifier_cb(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct virtio_mem *vm = container_of(nb, struct virtio_mem,
pm_notifier);
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
/*
* When restarting the VM, all memory is unplugged. Don't
* allow to hibernate and restore from an image.
*/
dev_err(&vm->vdev->dev, "hibernation is not supported.\n");
return NOTIFY_BAD;
default:
return NOTIFY_OK;
}
}
/*
* Set a range of pages PG_offline. Remember pages that were never onlined
* (via generic_online_page()) using PageDirty().
*/
static void virtio_mem_set_fake_offline(unsigned long pfn,
unsigned long nr_pages, bool onlined)
{
page_offline_begin();
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
if (!onlined)
/*
* Pages that have not been onlined yet were initialized
* to PageOffline(). Remember that we have to route them
* through generic_online_page().
*/
SetPageDirty(page);
else
__SetPageOffline(page);
VM_WARN_ON_ONCE(!PageOffline(page));
}
page_offline_end();
}
/*
* Clear PG_offline from a range of pages. If the pages were never onlined,
* (via generic_online_page()), clear PageDirty().
*/
static void virtio_mem_clear_fake_offline(unsigned long pfn,
unsigned long nr_pages, bool onlined)
{
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
if (!onlined)
/* generic_online_page() will clear PageOffline(). */
ClearPageDirty(page);
else
__ClearPageOffline(page);
}
}
/*
* Release a range of fake-offline pages to the buddy, effectively
* fake-onlining them.
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
unsigned long order = MAX_PAGE_ORDER;
unsigned long i;
/*
* We might get called for ranges that don't cover properly aligned
* MAX_PAGE_ORDER pages; however, we can only online properly aligned
* pages with an order of MAX_PAGE_ORDER at maximum.
*/
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--;
for (i = 0; i < nr_pages; i += 1 << order) {
struct page *page = pfn_to_page(pfn + i);
/*
* If the page is PageDirty(), it was kept fake-offline when
* onlining the memory block. Otherwise, it was allocated
* using alloc_contig_range(). All pages in a subblock are
* alike.
*/
if (PageDirty(page)) {
virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
generic_online_page(page, order);
} else {
virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
free_contig_range(pfn + i, 1 << order);
adjust_managed_page_count(page, 1 << order);
}
}
}
/*
* Try to allocate a range, marking pages fake-offline, effectively
* fake-offlining them.
*/
static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
unsigned long nr_pages)
{
const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
/*
* TODO: We want an alloc_contig_range() mode that tries to allocate
* harder (e.g., dealing with temporarily pinned pages, PCP), especially
* with ZONE_MOVABLE. So for now, retry a couple of times with
* ZONE_MOVABLE before giving up - because that zone is supposed to give
* some guarantees.
*/
for (retry_count = 0; retry_count < 5; retry_count++) {
/*
* If the config changed, stop immediately and go back to the
* main loop: avoid trying to keep unplugging if the device
* might have decided to not remove any more memory.
*/
if (atomic_read(&vm->config_changed))
return -EAGAIN;
rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
GFP_KERNEL);
if (rc == -ENOMEM)
/* whoops, out of memory */
return rc;
else if (rc && !is_movable)
break;
else if (rc)
continue;
virtio_mem_set_fake_offline(pfn, nr_pages, true);
adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
return 0;
}
return -EBUSY;
}
/*
* Handle fake-offline pages when memory is going offline - such that the
* pages can be skipped by mm-core when offlining.
*/
static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
unsigned long nr_pages)
{
struct page *page;
unsigned long i;
/* Drop our reference to the pages so the memory can get offlined. */
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(pfn + i);
if (WARN_ON(!page_ref_dec_and_test(page)))
dump_page(page, "fake-offline page referenced");
}
}
/*
* Handle fake-offline pages when memory offlining is canceled - to undo
* what we did in virtio_mem_fake_offline_going_offline().
*/
static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long nr_pages)
{
unsigned long i;
/*
* Get the reference again that we dropped via page_ref_dec_and_test()
* when going offline.
*/
for (i = 0; i < nr_pages; i++)
page_ref_inc(pfn_to_page(pfn + i));
}
static void virtio_mem_online_page(struct virtio_mem *vm,
struct page *page, unsigned int order)
{
const unsigned long start = page_to_phys(page);
const unsigned long end = start + PFN_PHYS(1 << order);
unsigned long addr, next, id, sb_id, count;
bool do_online;
/*
* We can get called with any order up to MAX_PAGE_ORDER. If our subblock
* size is smaller than that and we have a mixture of plugged and
* unplugged subblocks within such a page, we have to process in
* smaller granularity. In that case we'll adjust the order exactly once
* within the loop.
*/
for (addr = start; addr < end; ) {
next = addr + PFN_PHYS(1 << order);
if (vm->in_sbm) {
id = virtio_mem_phys_to_mb_id(addr);
sb_id = virtio_mem_phys_to_sb_id(vm, addr);
count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
/* Fully plugged. */
do_online = true;
} else if (count == 1 ||
virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
/* Fully unplugged. */
do_online = false;
} else {
/*
* Mixture, process sub-blocks instead. This
* will be at least the size of a pageblock.
* We'll run into this case exactly once.
*/
order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
continue;
}
} else {
/*
* If the whole block is marked fake offline, keep
* everything that way.
*/
id = virtio_mem_phys_to_bb_id(vm, addr);
do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
}
if (do_online)
generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
else
virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
false);
addr = next;
}
}
static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
{
const unsigned long addr = page_to_phys(page);
struct virtio_mem *vm;
rcu_read_lock();
list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
/*
* Pages we're onlining will never cross memory blocks and,
* therefore, not virtio-mem devices.
*/
if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
continue;
/*
* virtio_mem_set_fake_offline() might sleep. We can safely
* drop the RCU lock at this point because the device
* cannot go away. See virtio_mem_remove() how races
* between memory onlining and device removal are handled.
*/
rcu_read_unlock();
virtio_mem_online_page(vm, page, order);
return;
}
rcu_read_unlock();
/* not virtio-mem memory, but e.g., a DIMM. online it */
generic_online_page(page, order);
}
static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
const struct virtio_mem_req *req)
{
struct scatterlist *sgs[2], sg_req, sg_resp;
unsigned int len;
int rc;
/* don't use the request residing on the stack (vaddr) */
vm->req = *req;
/* out: buffer for request */
sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
sgs[0] = &sg_req;
/* in: buffer for response */
sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
sgs[1] = &sg_resp;
rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
if (rc < 0)
return rc;
virtqueue_kick(vm->vq);
/* wait for a response */
wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
return virtio16_to_cpu(vm->vdev, vm->resp.type);
}
static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size)
{
const uint64_t nb_vm_blocks = size / vm->device_block_size;
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size += size;
return 0;
case VIRTIO_MEM_RESP_NACK:
rc = -EAGAIN;
break;
case VIRTIO_MEM_RESP_BUSY:
rc = -ETXTBSY;
break;
case VIRTIO_MEM_RESP_ERROR:
rc = -EINVAL;
break;
default:
break;
}
dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
return rc;
}
static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size)
{
const uint64_t nb_vm_blocks = size / vm->device_block_size;
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size -= size;
return 0;
case VIRTIO_MEM_RESP_BUSY:
rc = -ETXTBSY;
break;
case VIRTIO_MEM_RESP_ERROR:
rc = -EINVAL;
break;
default:
break;
}
dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
return rc;
}
static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
{
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
};
int rc = -ENOMEM;
dev_dbg(&vm->vdev->dev, "unplugging all memory");
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->unplug_all_required = false;
vm->plugged_size = 0;
/* usable region might have shrunk */
atomic_set(&vm->config_changed, 1);
return 0;
case VIRTIO_MEM_RESP_BUSY:
rc = -ETXTBSY;
break;
default:
break;
}
dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
return rc;
}
/*
* Plug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size;
const uint64_t size = count * vm->sbm.sb_size;
int rc;
rc = virtio_mem_send_plug_request(vm, addr, size);
if (!rc)
virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
return rc;
}
/*
* Unplug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size;
const uint64_t size = count * vm->sbm.sb_size;
int rc;
rc = virtio_mem_send_unplug_request(vm, addr, size);
if (!rc)
virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
return rc;
}
/*
* Request to unplug a big block.
*
* Will not modify the state of the big block.
*/
static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
{
const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
const uint64_t size = vm->bbm.bb_size;
return virtio_mem_send_unplug_request(vm, addr, size);
}
/*
* Request to plug a big block.
*
* Will not modify the state of the big block.
*/
static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
{
const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
const uint64_t size = vm->bbm.bb_size;
return virtio_mem_send_plug_request(vm, addr, size);
}
/*
* Unplug the desired number of plugged subblocks of a offline or not-added
* memory block. Will fail if any subblock cannot get unplugged (instead of
* skipping it).
*
* Will not modify the state of the memory block.
*
* Note: can fail after some subblocks were unplugged.
*/
static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
unsigned long mb_id, uint64_t *nb_sb)
{
int sb_id, count;
int rc;
sb_id = vm->sbm.sbs_per_mb - 1;
while (*nb_sb) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
/* Try to unplug multiple subblocks at a time */
count = 1;
while (count < *nb_sb && sb_id > 0 &&
virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
count++;
sb_id--;
}
rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
sb_id--;
}
return 0;
}
/*
* Unplug all plugged subblocks of an offline or not-added memory block.
*
* Will not modify the state of the memory block.
*
* Note: can fail after some subblocks were unplugged.
*/
static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
{
uint64_t nb_sb = vm->sbm.sbs_per_mb;
return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
}
/*
* Prepare tracking data for the next memory block.
*/
static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
unsigned long *mb_id)
{
int rc;
if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
return -ENOSPC;
/* Resize the state array if required. */
rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
if (rc)
return rc;
/* Resize the subblock bitmap if required. */
rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
if (rc)
return rc;
vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
*mb_id = vm->sbm.next_mb_id++;
return 0;
}
/*
* Try to plug the desired number of subblocks and add the memory block
* to Linux.
*
* Will modify the state of the memory block.
*/
static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
unsigned long mb_id, uint64_t *nb_sb)
{
const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
int rc;
if (WARN_ON_ONCE(!count))
return -EINVAL;
/*
* Plug the requested number of subblocks before adding it to linux,
* so that onlining will directly online all plugged subblocks.
*/
rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
if (rc)
return rc;
/*
* Mark the block properly offline before adding it to Linux,
* so the memory notifiers will find the block in the right state.
*/
if (count == vm->sbm.sbs_per_mb)
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE);
else
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
/* Add the memory block to linux - if that fails, try to unplug. */
rc = virtio_mem_sbm_add_mb(vm, mb_id);
if (rc) {
int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
return rc;
}
*nb_sb -= count;
return 0;
}
/*
* Try to plug the desired number of subblocks of a memory block that
* is already added to Linux.
*
* Will modify the state of the memory block.
*
* Note: Can fail after some subblocks were successfully plugged.
*/
static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
unsigned long mb_id, uint64_t *nb_sb)
{
const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
unsigned long pfn, nr_pages;
int sb_id, count;
int rc;
if (WARN_ON_ONCE(!*nb_sb))
return -EINVAL;
while (*nb_sb) {
sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
if (sb_id >= vm->sbm.sbs_per_mb)
break;
count = 1;
while (count < *nb_sb &&
sb_id + count < vm->sbm.sbs_per_mb &&
!virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
count++;
rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
continue;
/* fake-online the pages if the memory block is online */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
virtio_mem_fake_online(pfn, nr_pages);
}
if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
return 0;
}
static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
{
const int mb_states[] = {
VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
};
uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc, i;
if (!nb_sb)
return 0;
/* Don't race with onlining/offlining */
mutex_lock(&vm->hotplug_mutex);
for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
}
}
/*
* We won't be working on online/offline memory blocks from this point,
* so we can't race with memory onlining/offlining. Drop the mutex.
*/
mutex_unlock(&vm->hotplug_mutex);
/* Try to plug and add unused blocks */
virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
return rc;
cond_resched();
}
/* Try to prepare, plug and add new blocks */
while (nb_sb) {
if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
if (rc)
return rc;
rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc)
return rc;
cond_resched();
}
return 0;
out_unlock:
mutex_unlock(&vm->hotplug_mutex);
return rc;
}
/*
* Plug a big block and add it to Linux.
*
* Will modify the state of the big block.
*/
static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
unsigned long bb_id)
{
int rc;
if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
VIRTIO_MEM_BBM_BB_UNUSED))
return -EINVAL;
rc = virtio_mem_bbm_plug_bb(vm, bb_id);
if (rc)
return rc;
virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
rc = virtio_mem_bbm_add_bb(vm, bb_id);
if (rc) {
if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
virtio_mem_bbm_set_bb_state(vm, bb_id,
VIRTIO_MEM_BBM_BB_UNUSED);
else
/* Retry from the main loop. */
virtio_mem_bbm_set_bb_state(vm, bb_id,
VIRTIO_MEM_BBM_BB_PLUGGED);
return rc;
}
return 0;
}
/*
* Prepare tracking data for the next big block.
*/
static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
unsigned long *bb_id)
{
int rc;
if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
return -ENOSPC;
/* Resize the big block state array if required. */
rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
if (rc)
return rc;
vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
*bb_id = vm->bbm.next_bb_id;
vm->bbm.next_bb_id++;
return 0;
}
static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
{
uint64_t nb_bb = diff / vm->bbm.bb_size;
unsigned long bb_id;
int rc;
if (!nb_bb)
return 0;
/* Try to plug and add unused big blocks */
virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
return -ENOSPC;
rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
if (!rc)
nb_bb--;
if (rc || !nb_bb)
return rc;
cond_resched();
}
/* Try to prepare, plug and add new big blocks */
while (nb_bb) {
if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
return -ENOSPC;
rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
if (rc)
return rc;
rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
if (!rc)
nb_bb--;
if (rc)
return rc;
cond_resched();
}
return 0;
}
/*
* Try to plug the requested amount of memory.
*/
static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
{
if (vm->in_sbm)
return virtio_mem_sbm_plug_request(vm, diff);
return virtio_mem_bbm_plug_request(vm, diff);
}
/*
* Unplug the desired number of plugged subblocks of an offline memory block.
* Will fail if any subblock cannot get unplugged (instead of skipping it).
*
* Will modify the state of the memory block. Might temporarily drop the
* hotplug_mutex.
*
* Note: Can fail after some subblocks were successfully unplugged.
*/
static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
unsigned long mb_id,
uint64_t *nb_sb)
{
int rc;
rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
/* some subblocks might have been unplugged even on failure */
if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
if (rc)
return rc;
if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
/*
* Remove the block from Linux - this should never fail.
* Hinder the block from getting onlined by marking it
* unplugged. Temporarily drop the mutex, so
* any pending GOING_ONLINE requests can be serviced/rejected.
*/
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_UNUSED);
mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_sbm_remove_mb(vm, mb_id);
BUG_ON(rc);
mutex_lock(&vm->hotplug_mutex);
}
return 0;
}
/*
* Unplug the given plugged subblocks of an online memory block.
*
* Will modify the state of the memory block.
*/
static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
unsigned long mb_id, int sb_id,
int count)
{
const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
unsigned long start_pfn;
int rc;
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages);
if (rc)
return rc;
/* Try to unplug the allocated memory */
rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc) {
/* Return the memory to the buddy. */
virtio_mem_fake_online(start_pfn, nr_pages);
return rc;
}
switch (old_state) {
case VIRTIO_MEM_SBM_MB_KERNEL:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
break;
case VIRTIO_MEM_SBM_MB_MOVABLE:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
break;
}
return 0;
}
/*
* Unplug the desired number of plugged subblocks of an online memory block.
* Will skip subblock that are busy.
*
* Will modify the state of the memory block. Might temporarily drop the
* hotplug_mutex.
*
* Note: Can fail after some subblocks were successfully unplugged. Can
* return 0 even if subblocks were busy and could not get unplugged.
*/
static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
unsigned long mb_id,
uint64_t *nb_sb)
{
int rc, sb_id;
/* If possible, try to unplug the complete block in one shot. */
if (*nb_sb >= vm->sbm.sbs_per_mb &&
virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
vm->sbm.sbs_per_mb);
if (!rc) {
*nb_sb -= vm->sbm.sbs_per_mb;
goto unplugged;
} else if (rc != -EBUSY)
return rc;
}
/* Fallback to single subblocks. */
for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
!virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
if (rc == -EBUSY)
continue;
else if (rc)
return rc;
*nb_sb -= 1;
}
unplugged:
rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id);
if (rc)
vm->sbm.have_unplugged_mb = 1;
/* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
/*
* Unplug the desired number of plugged subblocks of a memory block that is
* already added to Linux. Will skip subblock of online memory blocks that are
* busy (by the OS). Will fail if any subblock that's not busy cannot get
* unplugged.
*
* Will modify the state of the memory block. Might temporarily drop the
* hotplug_mutex.
*
* Note: Can fail after some subblocks were successfully unplugged. Can
* return 0 even if subblocks were busy and could not get unplugged.
*/
static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
unsigned long mb_id,
uint64_t *nb_sb)
{
const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
switch (old_state) {
case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
case VIRTIO_MEM_SBM_MB_KERNEL:
case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
case VIRTIO_MEM_SBM_MB_MOVABLE:
return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
case VIRTIO_MEM_SBM_MB_OFFLINE:
return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
}
return -EINVAL;
}
static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
const int mb_states[] = {
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
VIRTIO_MEM_SBM_MB_OFFLINE,
VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
VIRTIO_MEM_SBM_MB_MOVABLE,
VIRTIO_MEM_SBM_MB_KERNEL,
};
uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc, i;
if (!nb_sb)
return 0;
/*
* We'll drop the mutex a couple of times when it is safe to do so.
* This might result in some blocks switching the state (online/offline)
* and we could miss them in this run - we will retry again later.
*/
mutex_lock(&vm->hotplug_mutex);
/*
* We try unplug from partially plugged blocks first, to try removing
* whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
* as it's more reliable to unplug memory and remove whole memory
* blocks, and we don't want to trigger a zone imbalances by
* accidentially removing too much kernel memory.
*/
for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
cond_resched();
mutex_lock(&vm->hotplug_mutex);
}
if (!unplug_online && i == 1) {
mutex_unlock(&vm->hotplug_mutex);
return 0;
}
}
mutex_unlock(&vm->hotplug_mutex);
return nb_sb ? -EBUSY : 0;
out_unlock:
mutex_unlock(&vm->hotplug_mutex);
return rc;
}
/*
* Try to offline and remove a big block from Linux and unplug it. Will fail
* with -EBUSY if some memory is busy and cannot get unplugged.
*
* Will modify the state of the memory block. Might temporarily drop the
* hotplug_mutex.
*/
static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
unsigned long bb_id)
{
const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
struct page *page;
int rc;
if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
VIRTIO_MEM_BBM_BB_ADDED))
return -EINVAL;
/*
* Start by fake-offlining all memory. Once we marked the device
* block as fake-offline, all newly onlined memory will
* automatically be kept fake-offline. Protect from concurrent
* onlining/offlining until we have a consistent state.
*/
mutex_lock(&vm->hotplug_mutex);
virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
continue;
rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
if (rc) {
end_pfn = pfn;
goto rollback;
}
}
mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
if (rc) {
mutex_lock(&vm->hotplug_mutex);
goto rollback;
}
rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
if (rc)
virtio_mem_bbm_set_bb_state(vm, bb_id,
VIRTIO_MEM_BBM_BB_PLUGGED);
else
virtio_mem_bbm_set_bb_state(vm, bb_id,
VIRTIO_MEM_BBM_BB_UNUSED);
return rc;
rollback:
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
continue;
virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
}
virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
mutex_unlock(&vm->hotplug_mutex);
return rc;
}
/*
* Test if a big block is completely offline.
*/
static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
unsigned long bb_id)
{
const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
unsigned long pfn;
for (pfn = start_pfn; pfn < start_pfn + nr_pages;
pfn += PAGES_PER_SECTION) {
if (pfn_to_online_page(pfn))
return false;
}
return true;
}
/*
* Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
*/
static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
unsigned long bb_id)
{
const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
struct page *page;
unsigned long pfn;
for (pfn = start_pfn; pfn < start_pfn + nr_pages;
pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
continue;
if (page_zonenum(page) != ZONE_MOVABLE)
return false;
}
return true;
}
static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
uint64_t nb_bb = diff / vm->bbm.bb_size;
uint64_t bb_id;
int rc, i;
if (!nb_bb)
return 0;
/*
* Try to unplug big blocks. Similar to SBM, start with offline
* big blocks.
*/
for (i = 0; i < 3; i++) {
virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
cond_resched();
/*
* As we're holding no locks, these checks are racy,
* but we don't care.
*/
if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
continue;
if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
continue;
rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
if (rc == -EBUSY)
continue;
if (!rc)
nb_bb--;
if (rc || !nb_bb)
return rc;
}
if (i == 0 && !unplug_online)
return 0;
}
return nb_bb ? -EBUSY : 0;
}
/*
* Try to unplug the requested amount of memory.
*/
static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
if (vm->in_sbm)
return virtio_mem_sbm_unplug_request(vm, diff);
return virtio_mem_bbm_unplug_request(vm, diff);
}
/*
* Try to unplug all blocks that couldn't be unplugged before, for example,
* because the hypervisor was busy. Further, offline and remove any memory
* blocks where we previously failed.
*/
static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
{
unsigned long id;
int rc = 0;
if (!vm->in_sbm) {
virtio_mem_bbm_for_each_bb(vm, id,
VIRTIO_MEM_BBM_BB_PLUGGED) {
rc = virtio_mem_bbm_unplug_bb(vm, id);
if (rc)
return rc;
virtio_mem_bbm_set_bb_state(vm, id,
VIRTIO_MEM_BBM_BB_UNUSED);
}
return 0;
}
virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
rc = virtio_mem_sbm_unplug_mb(vm, id);
if (rc)
return rc;
virtio_mem_sbm_set_mb_state(vm, id,
VIRTIO_MEM_SBM_MB_UNUSED);
}
if (!vm->sbm.have_unplugged_mb)
return 0;
/*
* Let's retry (offlining and) removing completely unplugged Linux
* memory blocks.
*/
vm->sbm.have_unplugged_mb = false;
mutex_lock(&vm->hotplug_mutex);
virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL)
rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL)
rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
mutex_unlock(&vm->hotplug_mutex);
if (rc)
vm->sbm.have_unplugged_mb = true;
/* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
/*
* Update all parts of the config that could have changed.
*/
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
&new_plugged_size);
if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
vm->plugged_size = new_plugged_size;
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
usable_region_size, &usable_region_size);
end_addr = min(vm->addr + usable_region_size - 1,
pluggable_range.end);
if (vm->in_sbm) {
vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
vm->sbm.last_usable_mb_id--;
} else {
vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
end_addr);
if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
vm->bbm.last_usable_bb_id--;
}
/*
* If we cannot plug any of our device memory (e.g., nothing in the
* usable region is addressable), the last usable memory block id will
* be smaller than the first usable memory block id. We'll stop
* attempting to add memory with -ENOSPC from our main loop.
*/
/* see if there is a request to change the size */
virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
&vm->requested_size);
dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
}
/*
* Workqueue function for handling plug/unplug requests and config updates.
*/
static void virtio_mem_run_wq(struct work_struct *work)
{
struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
uint64_t diff;
int rc;
if (unlikely(vm->in_kdump)) {
dev_warn_once(&vm->vdev->dev,
"unexpected workqueue run in kdump kernel\n");
return;
}
hrtimer_cancel(&vm->retry_timer);
if (vm->broken)
return;
atomic_set(&vm->wq_active, 1);
retry:
rc = 0;
/* Make sure we start with a clean state if there are leftovers. */
if (unlikely(vm->unplug_all_required))
rc = virtio_mem_send_unplug_all_request(vm);
if (atomic_read(&vm->config_changed)) {
atomic_set(&vm->config_changed, 0);
virtio_mem_refresh_config(vm);
}
/* Cleanup any leftovers from previous runs */
if (!rc)
rc = virtio_mem_cleanup_pending_mb(vm);
if (!rc && vm->requested_size != vm->plugged_size) {
if (vm->requested_size > vm->plugged_size) {
diff = vm->requested_size - vm->plugged_size;
rc = virtio_mem_plug_request(vm, diff);
} else {
diff = vm->plugged_size - vm->requested_size;
rc = virtio_mem_unplug_request(vm, diff);
}
}
/*
* Keep retrying to offline and remove completely unplugged Linux
* memory blocks.
*/
if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb)
rc = -EBUSY;
switch (rc) {
case 0:
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
break;
case -ENOSPC:
/*
* We cannot add any more memory (alignment, physical limit)
* or we have too many offline memory blocks.
*/
break;
case -ETXTBSY:
/*
* The hypervisor cannot process our request right now
* (e.g., out of memory, migrating);
*/
case -EBUSY:
/*
* We cannot free up any memory to unplug it (all plugged memory
* is busy).
*/
case -ENOMEM:
/* Out of memory, try again later. */
hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
HRTIMER_MODE_REL);
break;
case -EAGAIN:
/* Retry immediately (e.g., the config changed). */
goto retry;
default:
/* Unknown error, mark as broken */
dev_err(&vm->vdev->dev,
"unknown error, marking device broken: %d\n", rc);
vm->broken = true;
}
atomic_set(&vm->wq_active, 0);
}
static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
{
struct virtio_mem *vm = container_of(timer, struct virtio_mem,
retry_timer);
virtio_mem_retry(vm);
vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
VIRTIO_MEM_RETRY_TIMER_MAX_MS);
return HRTIMER_NORESTART;
}
static void virtio_mem_handle_response(struct virtqueue *vq)
{
struct virtio_mem *vm = vq->vdev->priv;
wake_up(&vm->host_resp);
}
static int virtio_mem_init_vq(struct virtio_mem *vm)
{
struct virtqueue *vq;
vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
"guest-request");
if (IS_ERR(vq))
return PTR_ERR(vq);
vm->vq = vq;
return 0;
}
static int virtio_mem_init_hotplug(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
uint64_t unit_pages, sb_size, addr;
int rc;
/* bad device setup - warn only */
if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
dev_warn(&vm->vdev->dev,
"The alignment of the physical start address can make some memory unusable.\n");
if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
dev_warn(&vm->vdev->dev,
"The alignment of the physical end address can make some memory unusable.\n");
if (vm->addr < pluggable_range.start ||
vm->addr + vm->region_size - 1 > pluggable_range.end)
dev_warn(&vm->vdev->dev,
"Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
/* Prepare the offline threshold - make sure we can add two blocks. */
vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
/*
* alloc_contig_range() works reliably with pageblock
* granularity on ZONE_NORMAL, use pageblock_nr_pages.
*/
sb_size = PAGE_SIZE * pageblock_nr_pages;
sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
if (sb_size < memory_block_size_bytes() && !force_bbm) {
/* SBM: At least two subblocks per Linux memory block. */
vm->in_sbm = true;
vm->sbm.sb_size = sb_size;
vm->sbm.sbs_per_mb = memory_block_size_bytes() /
vm->sbm.sb_size;
/* Round up to the next full memory block */
addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
memory_block_size_bytes() - 1;
vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
vm->sbm.next_mb_id = vm->sbm.first_mb_id;
} else {
/* BBM: At least one Linux memory block. */
vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
memory_block_size_bytes());
if (bbm_block_size) {
if (!is_power_of_2(bbm_block_size)) {
dev_warn(&vm->vdev->dev,
"bbm_block_size is not a power of 2");
} else if (bbm_block_size < vm->bbm.bb_size) {
dev_warn(&vm->vdev->dev,
"bbm_block_size is too small");
} else {
vm->bbm.bb_size = bbm_block_size;
}
}
/* Round up to the next aligned big block */
addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
vm->bbm.bb_size - 1;
vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
vm->bbm.next_bb_id = vm->bbm.first_bb_id;
/* Make sure we can add two big blocks. */
vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
vm->offline_threshold);
}
dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
memory_block_size_bytes());
if (vm->in_sbm)
dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
(unsigned long long)vm->sbm.sb_size);
else
dev_info(&vm->vdev->dev, "big block size: 0x%llx",
(unsigned long long)vm->bbm.bb_size);
/* create the parent resource for all memory */
rc = virtio_mem_create_resource(vm);
if (rc)
return rc;
/* use a single dynamic memory group to cover the whole memory device */
if (vm->in_sbm)
unit_pages = PHYS_PFN(memory_block_size_bytes());
else
unit_pages = PHYS_PFN(vm->bbm.bb_size);
rc = memory_group_register_dynamic(vm->nid, unit_pages);
if (rc < 0)
goto out_del_resource;
vm->mgid = rc;
/*
* If we still have memory plugged, we have to unplug all memory first.
* Registering our parent resource makes sure that this memory isn't
* actually in use (e.g., trying to reload the driver).
*/
if (vm->plugged_size) {
vm->unplug_all_required = true;
dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
}
/* register callbacks */
vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
goto out_unreg_group;
/* Block hibernation as early as possible. */
vm->pm_notifier.priority = INT_MAX;
vm->pm_notifier.notifier_call = virtio_mem_pm_notifier_cb;
rc = register_pm_notifier(&vm->pm_notifier);
if (rc)
goto out_unreg_mem;
rc = register_virtio_mem_device(vm);
if (rc)
goto out_unreg_pm;
return 0;
out_unreg_pm:
unregister_pm_notifier(&vm->pm_notifier);
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
out_unreg_group:
memory_group_unregister(vm->mgid);
out_del_resource:
virtio_mem_delete_resource(vm);
return rc;
}
#ifdef CONFIG_PROC_VMCORE
static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size)
{
const uint64_t nb_vm_blocks = size / vm->device_block_size;
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
.u.state.addr = cpu_to_virtio64(vm->vdev, addr),
.u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
int rc = -ENOMEM;
dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
case VIRTIO_MEM_RESP_ERROR:
rc = -EINVAL;
break;
default:
break;
}
dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
return rc;
}
static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
unsigned long pfn)
{
struct virtio_mem *vm = container_of(cb, struct virtio_mem,
vmcore_cb);
uint64_t addr = PFN_PHYS(pfn);
bool is_ram;
int rc;
if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
return true;
if (!vm->plugged_size)
return false;
/*
* We have to serialize device requests and access to the information
* about the block queried last.
*/
mutex_lock(&vm->hotplug_mutex);
addr = ALIGN_DOWN(addr, vm->device_block_size);
if (addr != vm->last_block_addr) {
rc = virtio_mem_send_state_request(vm, addr,
vm->device_block_size);
/* On any kind of error, we're going to signal !ram. */
if (rc == VIRTIO_MEM_STATE_PLUGGED)
vm->last_block_plugged = true;
else
vm->last_block_plugged = false;
vm->last_block_addr = addr;
}
is_ram = vm->last_block_plugged;
mutex_unlock(&vm->hotplug_mutex);
return is_ram;
}
#endif /* CONFIG_PROC_VMCORE */
static int virtio_mem_init_kdump(struct virtio_mem *vm)
{
#ifdef CONFIG_PROC_VMCORE
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
register_vmcore_cb(&vm->vmcore_cb);
return 0;
#else /* CONFIG_PROC_VMCORE */
dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
return -EBUSY;
#endif /* CONFIG_PROC_VMCORE */
}
static int virtio_mem_init(struct virtio_mem *vm)
{
uint16_t node_id;
if (!vm->vdev->config->get) {
dev_err(&vm->vdev->dev, "config access disabled\n");
return -EINVAL;
}
/* Fetch all properties that can't change. */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
&vm->plugged_size);
virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
&vm->device_block_size);
virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
&node_id);
vm->nid = virtio_mem_translate_node_id(vm, node_id);
virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
/* Determine the nid for the device based on the lowest address. */
if (vm->nid == NUMA_NO_NODE)
vm->nid = memory_add_physaddr_to_nid(vm->addr);
dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
dev_info(&vm->vdev->dev, "device block size: 0x%llx",
(unsigned long long)vm->device_block_size);
if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
/*
* We don't want to (un)plug or reuse any memory when in kdump. The
* memory is still accessible (but not exposed to Linux).
*/
if (vm->in_kdump)
return virtio_mem_init_kdump(vm);
return virtio_mem_init_hotplug(vm);
}
static int virtio_mem_create_resource(struct virtio_mem *vm)
{
/*
* When force-unloading the driver and removing the device, we
* could have a garbage pointer. Duplicate the string.
*/
const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
if (!name)
return -ENOMEM;
/* Disallow mapping device memory via /dev/mem completely. */
vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
name, IORESOURCE_SYSTEM_RAM |
IORESOURCE_EXCLUSIVE);
if (!vm->parent_resource) {
kfree(name);
dev_warn(&vm->vdev->dev, "could not reserve device region\n");
dev_info(&vm->vdev->dev,
"reloading the driver is not supported\n");
return -EBUSY;
}
/* The memory is not actually busy - make add_memory() work. */
vm->parent_resource->flags &= ~IORESOURCE_BUSY;
return 0;
}
static void virtio_mem_delete_resource(struct virtio_mem *vm)
{
const char *name;
if (!vm->parent_resource)
return;
name = vm->parent_resource->name;
release_resource(vm->parent_resource);
kfree(vm->parent_resource);
kfree(name);
vm->parent_resource = NULL;
}
static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
{
return 1;
}
static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
{
const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
vm->addr + vm->region_size, NULL,
virtio_mem_range_has_system_ram) == 1;
}
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
int rc;
BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
init_waitqueue_head(&vm->host_resp);
vm->vdev = vdev;
INIT_WORK(&vm->wq, virtio_mem_run_wq);
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vm->retry_timer.function = virtio_mem_timer_expired;
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
/* register the virtqueue */
rc = virtio_mem_init_vq(vm);
if (rc)
goto out_free_vm;
/* initialize the device by querying the config */
rc = virtio_mem_init(vm);
if (rc)
goto out_del_vq;
virtio_device_ready(vdev);
/* trigger a config update to start processing the requested_size */
if (!vm->in_kdump) {
atomic_set(&vm->config_changed, 1);
queue_work(system_freezable_wq, &vm->wq);
}
return 0;
out_del_vq:
vdev->config->del_vqs(vdev);
out_free_vm:
kfree(vm);
vdev->priv = NULL;
return rc;
}
static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
{
unsigned long mb_id;
int rc;
/*
* Make sure the workqueue won't be triggered anymore and no memory
* blocks can be onlined/offlined until we're finished here.
*/
mutex_lock(&vm->hotplug_mutex);
spin_lock_irq(&vm->removal_lock);
vm->removing = true;
spin_unlock_irq(&vm->removal_lock);
mutex_unlock(&vm->hotplug_mutex);
/* wait until the workqueue stopped */
cancel_work_sync(&vm->wq);
hrtimer_cancel(&vm->retry_timer);
if (vm->in_sbm) {
/*
* After we unregistered our callbacks, user space can online
* partially plugged offline blocks. Make sure to remove them.
*/
virtio_mem_sbm_for_each_mb(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
rc = virtio_mem_sbm_remove_mb(vm, mb_id);
BUG_ON(rc);
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_UNUSED);
}
/*
* After we unregistered our callbacks, user space can no longer
* offline partially plugged online memory blocks. No need to
* worry about them.
*/
}
/* unregister callbacks */
unregister_virtio_mem_device(vm);
unregister_pm_notifier(&vm->pm_notifier);
unregister_memory_notifier(&vm->memory_notifier);
/*
* There is no way we could reliably remove all memory we have added to
* the system. And there is no way to stop the driver/device from going
* away. Warn at least.
*/
if (virtio_mem_has_memory_added(vm)) {
dev_warn(&vm->vdev->dev,
"device still has system memory added\n");
} else {
virtio_mem_delete_resource(vm);
kfree_const(vm->resource_name);
memory_group_unregister(vm->mgid);
}
/* remove all tracking data - no locking needed */
if (vm->in_sbm) {
vfree(vm->sbm.mb_states);
vfree(vm->sbm.sb_states);
} else {
vfree(vm->bbm.bb_states);
}
}
static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
{
#ifdef CONFIG_PROC_VMCORE
unregister_vmcore_cb(&vm->vmcore_cb);
#endif /* CONFIG_PROC_VMCORE */
}
static void virtio_mem_remove(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
if (vm->in_kdump)
virtio_mem_deinit_kdump(vm);
else
virtio_mem_deinit_hotplug(vm);
/* reset the device and cleanup the queues */
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
kfree(vm);
vdev->priv = NULL;
}
static void virtio_mem_config_changed(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
if (unlikely(vm->in_kdump))
return;
atomic_set(&vm->config_changed, 1);
virtio_mem_retry(vm);
}
#ifdef CONFIG_PM_SLEEP
static int virtio_mem_freeze(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
/*
* We block hibernation using the PM notifier completely. The workqueue
* is already frozen by the PM core at this point, so we simply
* reset the device and cleanup the queues.
*/
if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE &&
vm->plugged_size &&
!virtio_has_feature(vm->vdev, VIRTIO_MEM_F_PERSISTENT_SUSPEND)) {
dev_err(&vm->vdev->dev,
"suspending with plugged memory is not supported\n");
return -EPERM;
}
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
vm->vq = NULL;
return 0;
}
static int virtio_mem_restore(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
int ret;
ret = virtio_mem_init_vq(vm);
if (ret)
return ret;
virtio_device_ready(vdev);
/* Let's check if anything changed. */
virtio_mem_config_changed(vdev);
return 0;
}
#endif
static unsigned int virtio_mem_features[] = {
#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
VIRTIO_MEM_F_ACPI_PXM,
#endif
VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
VIRTIO_MEM_F_PERSISTENT_SUSPEND,
};
static const struct virtio_device_id virtio_mem_id_table[] = {
{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct virtio_driver virtio_mem_driver = {
.feature_table = virtio_mem_features,
.feature_table_size = ARRAY_SIZE(virtio_mem_features),
.driver.name = KBUILD_MODNAME,
.id_table = virtio_mem_id_table,
.probe = virtio_mem_probe,
.remove = virtio_mem_remove,
.config_changed = virtio_mem_config_changed,
#ifdef CONFIG_PM_SLEEP
.freeze = virtio_mem_freeze,
.restore = virtio_mem_restore,
#endif
};
module_virtio_driver(virtio_mem_driver);
MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
MODULE_AUTHOR("David Hildenbrand <[email protected]>");
MODULE_DESCRIPTION("Virtio-mem driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_devcaps.h"
#include <drm/ttm/ttm_placement.h>
#include <linux/sched/signal.h>
#include <linux/vmalloc.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_3D))
return false;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint32_t result;
if (!dev_priv->has_mob)
return false;
result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
return (result != 0);
}
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
BUG_ON(vmw_is_svga_v3(dev_priv));
fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
hwversion = vmw_fifo_mem_read(dev_priv,
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
/* Legacy Display Unit does not support surfaces */
if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
return true;
}
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
return false;
}
struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
{
struct vmw_fifo_state *fifo;
uint32_t max;
uint32_t min;
if (!dev_priv->fifo_mem)
return NULL;
fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return ERR_PTR(-ENOMEM);
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL)) {
kfree(fifo);
return ERR_PTR(-ENOMEM);
}
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
min <<= 2;
if (min < PAGE_SIZE)
min = PAGE_SIZE;
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
wmb();
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
drm_info(&dev_priv->drm,
"Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
if (unlikely(min >= max)) {
drm_warn(&dev_priv->drm,
"FIFO memory is not usable. Driver failed to initialize.");
return ERR_PTR(-ENXIO);
}
return fifo;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->fifo_mem;
if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
void vmw_fifo_destroy(struct vmw_private *dev_priv)
{
struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!fifo)
return;
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
}
if (likely(fifo->dynamic_buffer != NULL)) {
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
kfree(fifo);
dev_priv->fifo = NULL;
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = jiffies + timeout;
DEFINE_WAIT(__wait);
DRM_INFO("Fifo wait noirq.\n");
for (;;) {
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
}
schedule_timeout(1);
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&dev_priv->fifo_queue, &__wait);
wake_up_all(&dev_priv->fifo_queue);
DRM_INFO("Fifo noirq exit.\n");
return ret;
}
static int vmw_fifo_wait(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
long ret = 1L;
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
else
ret = wait_event_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
return ret;
}
/*
* Reserve @bytes number of bytes in the fifo.
*
* This function will return NULL (error) on two conditions:
* If it timeouts waiting for fifo space, or if @bytes is larger than the
* available fifo space.
*
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
mutex_lock(&fifo_state->fifo_mutex);
max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
BUG_ON(fifo_state->reserved_size != 0);
BUG_ON(fifo_state->dynamic_buffer != NULL);
fifo_state->reserved_size = bytes;
while (1) {
uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
if (next_cmd >= stop) {
if (likely((next_cmd + bytes < max ||
(next_cmd + bytes == max && stop > min))))
reserve_in_place = true;
else if (vmw_fifo_is_full(dev_priv, bytes)) {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
} else
need_bounce = true;
} else {
if (likely((next_cmd + bytes < stop)))
reserve_in_place = true;
else {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
}
}
if (reserve_in_place) {
if (reserveable || bytes <= sizeof(uint32_t)) {
fifo_state->using_bounce_buffer = false;
if (reserveable)
vmw_fifo_mem_write(dev_priv,
SVGA_FIFO_RESERVED,
bytes);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
need_bounce = true;
}
}
if (need_bounce) {
fifo_state->using_bounce_buffer = true;
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
if (!fifo_state->dynamic_buffer)
goto out_err;
return fifo_state->dynamic_buffer;
}
}
}
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
if (dev_priv->cman)
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
ctx_id, false, NULL);
else if (ctx_id == SVGA3D_INVALID_ID)
ret = vmw_local_fifo_reserve(dev_priv, bytes);
else {
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
if (IS_ERR_OR_NULL(ret))
return NULL;
return ret;
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
u32 *fifo_mem = vmw->fifo_mem;
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
if (bytes < chunk_size)
chunk_size = bytes;
vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
mb();
bytes -= sizeof(uint32_t);
}
}
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
fifo_state->reserved_size = 0;
if (fifo_state->using_bounce_buffer) {
if (reserveable)
vmw_fifo_res_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
else
vmw_fifo_slow_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
vfree(fifo_state->dynamic_buffer);
fifo_state->dynamic_buffer = NULL;
}
}
down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
}
if (reserveable)
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_cmd_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
if (dev_priv->cman)
return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
else
return 0;
}
int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct svga_fifo_cmd_fence *cmd_fence;
u32 *fm;
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = VMW_CMD_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
do {
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
if (!vmw_has_fences(dev_priv)) {
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
vmw_cmd_commit(dev_priv, 0);
return 0;
}
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
vmw_update_seqno(dev_priv);
out_err:
return ret;
}
/**
* vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_cmd_emit_dummy_query documentation.
*/
static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
} *cmd;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
} else {
cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
}
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_cmd_emit_dummy_query documentation.
*/
static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
} *cmd;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->body.mobid = bo->resource->start;
cmd->body.offset = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
}
/**
* vmw_cmd_supported - returns true if the given device supports
* command queues.
*
* @vmw: The device private structure.
*
* Returns true if we can issue commands.
*/
bool vmw_cmd_supported(struct vmw_private *vmw)
{
bool has_cmdbufs =
(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
SVGA_CAP_CMD_BUFFERS_2)) != 0;
if (vmw_is_svga_v3(vmw))
return (has_cmdbufs &&
(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/*
* We have FIFO cmd's
*/
return has_cmdbufs || vmw->fifo_mem != NULL;
}
|
// SPDX-License-Identifier: GPL-2.0+
/*
* vio driver interface to hvc_console.c
*
* This code was moved here to allow the remaining code to be reused as a
* generic polling mode with semi-reliable transport driver core to the
* console and tty subsystems.
*
*
* Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
* Copyright (C) 2001 Paul Mackerras <[email protected]>, IBM
* Copyright (C) 2004 Benjamin Herrenschmidt <[email protected]>, IBM Corp.
* Copyright (C) 2004 IBM Corporation
*
* Additional Author(s):
* Ryan S. Arnold <[email protected]>
*
* TODO:
*
* - handle error in sending hvsi protocol packets
* - retry nego on subsequent sends ?
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/of.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
#include "hvc_console.h"
static const char hvc_driver_name[] = "hvc_console";
static const struct vio_device_id hvc_driver_table[] = {
{"serial", "hvterm1"},
#ifndef HVC_OLD_HVSI
{"serial", "hvterm-protocol"},
#endif
{ "", "" }
};
typedef enum hv_protocol {
HV_PROTOCOL_RAW,
HV_PROTOCOL_HVSI
} hv_protocol_t;
struct hvterm_priv {
u32 termno; /* HV term number */
hv_protocol_t proto; /* Raw data or HVSI packets */
struct hvsi_priv hvsi; /* HVSI specific data */
spinlock_t buf_lock;
u8 buf[SIZE_VIO_GET_CHARS];
size_t left;
size_t offset;
};
static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvterm_priv hvterm_priv0;
static ssize_t hvterm_raw_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
unsigned long i;
unsigned long flags;
size_t got;
if (WARN_ON(!pv))
return 0;
spin_lock_irqsave(&pv->buf_lock, flags);
if (pv->left == 0) {
pv->offset = 0;
pv->left = hvc_get_chars(pv->termno, pv->buf, count);
/*
* Work around a HV bug where it gives us a null
* after every \r. -- paulus
*/
for (i = 1; i < pv->left; ++i) {
if (pv->buf[i] == 0 && pv->buf[i-1] == '\r') {
--pv->left;
if (i < pv->left) {
memmove(&pv->buf[i], &pv->buf[i+1],
pv->left - i);
}
}
}
}
got = min(count, pv->left);
memcpy(buf, &pv->buf[pv->offset], got);
pv->offset += got;
pv->left -= got;
spin_unlock_irqrestore(&pv->buf_lock, flags);
return got;
}
/**
* hvterm_raw_put_chars: send characters to firmware for given vterm adapter
* @vtermno: The virtual terminal number.
* @buf: The characters to send. Because of the underlying hypercall in
* hvc_put_chars(), this buffer must be at least 16 bytes long, even if
* you are sending fewer chars.
* @count: number of chars to send.
*/
static ssize_t hvterm_raw_put_chars(uint32_t vtermno, const u8 *buf,
size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvc_put_chars(pv->termno, buf, count);
}
static const struct hv_ops hvterm_raw_ops = {
.get_chars = hvterm_raw_get_chars,
.put_chars = hvterm_raw_put_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
static ssize_t hvterm_hvsi_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvsilib_get_chars(&pv->hvsi, buf, count);
}
static ssize_t hvterm_hvsi_put_chars(uint32_t vtermno, const u8 *buf,
size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvsilib_put_chars(&pv->hvsi, buf, count);
}
static int hvterm_hvsi_open(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
int rc;
pr_devel("HVSI@%x: open !\n", pv->termno);
rc = notifier_add_irq(hp, data);
if (rc)
return rc;
return hvsilib_open(&pv->hvsi, hp);
}
static void hvterm_hvsi_close(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: do close !\n", pv->termno);
hvsilib_close(&pv->hvsi, hp);
notifier_del_irq(hp, data);
}
static void hvterm_hvsi_hangup(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: do hangup !\n", pv->termno);
hvsilib_close(&pv->hvsi, hp);
notifier_hangup_irq(hp, data);
}
static int hvterm_hvsi_tiocmget(struct hvc_struct *hp)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
if (!pv)
return -EINVAL;
return pv->hvsi.mctrl;
}
static int hvterm_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
unsigned int clear)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
pv->termno, set, clear);
if (set & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 1);
else if (clear & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 0);
return 0;
}
static const struct hv_ops hvterm_hvsi_ops = {
.get_chars = hvterm_hvsi_get_chars,
.put_chars = hvterm_hvsi_put_chars,
.notifier_add = hvterm_hvsi_open,
.notifier_del = hvterm_hvsi_close,
.notifier_hangup = hvterm_hvsi_hangup,
.tiocmget = hvterm_hvsi_tiocmget,
.tiocmset = hvterm_hvsi_tiocmset,
};
static void udbg_hvc_putc(char c)
{
int count = -1;
unsigned char bounce_buffer[16];
if (!hvterm_privs[0])
return;
if (c == '\n')
udbg_hvc_putc('\r');
do {
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
/*
* hvterm_raw_put_chars requires at least a 16-byte
* buffer, so go via the bounce buffer
*/
bounce_buffer[0] = c;
count = hvterm_raw_put_chars(0, bounce_buffer, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvterm_hvsi_put_chars(0, &c, 1);
break;
}
} while (count == 0 || count == -EAGAIN);
}
static int udbg_hvc_getc_poll(void)
{
int rc = 0;
char c;
if (!hvterm_privs[0])
return -1;
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
rc = hvterm_raw_get_chars(0, &c, 1);
break;
case HV_PROTOCOL_HVSI:
rc = hvterm_hvsi_get_chars(0, &c, 1);
break;
}
if (!rc)
return -1;
return c;
}
static int udbg_hvc_getc(void)
{
int ch;
if (!hvterm_privs[0])
return -1;
for (;;) {
ch = udbg_hvc_getc_poll();
if (ch == -1) {
/* This shouldn't be needed...but... */
volatile unsigned long delay;
for (delay=0; delay < 2000000; delay++)
;
} else {
return ch;
}
}
}
static int hvc_vio_probe(struct vio_dev *vdev,
const struct vio_device_id *id)
{
const struct hv_ops *ops;
struct hvc_struct *hp;
struct hvterm_priv *pv;
hv_protocol_t proto;
int i, termno = -1;
/* probed with invalid parameters. */
if (!vdev || !id)
return -EPERM;
if (of_device_is_compatible(vdev->dev.of_node, "hvterm1")) {
proto = HV_PROTOCOL_RAW;
ops = &hvterm_raw_ops;
} else if (of_device_is_compatible(vdev->dev.of_node, "hvterm-protocol")) {
proto = HV_PROTOCOL_HVSI;
ops = &hvterm_hvsi_ops;
} else {
pr_err("hvc_vio: Unknown protocol for %pOF\n", vdev->dev.of_node);
return -ENXIO;
}
pr_devel("hvc_vio_probe() device %pOF, using %s protocol\n",
vdev->dev.of_node,
proto == HV_PROTOCOL_RAW ? "raw" : "hvsi");
/* Is it our boot one ? */
if (hvterm_privs[0] == &hvterm_priv0 &&
vdev->unit_address == hvterm_priv0.termno) {
pv = hvterm_privs[0];
termno = 0;
pr_devel("->boot console, using termno 0\n");
}
/* nope, allocate a new one */
else {
for (i = 0; i < MAX_NR_HVC_CONSOLES && termno < 0; i++)
if (!hvterm_privs[i])
termno = i;
pr_devel("->non-boot console, using termno %d\n", termno);
if (termno < 0)
return -ENODEV;
pv = kzalloc(sizeof(struct hvterm_priv), GFP_KERNEL);
if (!pv)
return -ENOMEM;
pv->termno = vdev->unit_address;
pv->proto = proto;
spin_lock_init(&pv->buf_lock);
hvterm_privs[termno] = pv;
hvsilib_init(&pv->hvsi, hvc_get_chars, hvc_put_chars,
pv->termno, 0);
}
hp = hvc_alloc(termno, vdev->irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&vdev->dev, hp);
/* register udbg if it's not there already for console 0 */
if (hp->index == 0 && !udbg_putc) {
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
}
return 0;
}
static struct vio_driver hvc_vio_driver = {
.id_table = hvc_driver_table,
.probe = hvc_vio_probe,
.name = hvc_driver_name,
.driver = {
.suppress_bind_attrs = true,
},
};
static int __init hvc_vio_init(void)
{
int rc;
/* Register as a vio device to receive callbacks */
rc = vio_register_driver(&hvc_vio_driver);
return rc;
}
device_initcall(hvc_vio_init); /* after drivers/tty/hvc/hvc_console.c */
void __init hvc_vio_init_early(void)
{
const __be32 *termno;
const struct hv_ops *ops;
/* find the boot console from /chosen/stdout */
/* Check if it's a virtual terminal */
if (!of_node_name_prefix(of_stdout, "vty"))
return;
termno = of_get_property(of_stdout, "reg", NULL);
if (termno == NULL)
return;
hvterm_priv0.termno = of_read_number(termno, 1);
spin_lock_init(&hvterm_priv0.buf_lock);
hvterm_privs[0] = &hvterm_priv0;
/* Check the protocol */
if (of_device_is_compatible(of_stdout, "hvterm1")) {
hvterm_priv0.proto = HV_PROTOCOL_RAW;
ops = &hvterm_raw_ops;
}
else if (of_device_is_compatible(of_stdout, "hvterm-protocol")) {
hvterm_priv0.proto = HV_PROTOCOL_HVSI;
ops = &hvterm_hvsi_ops;
hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
hvterm_priv0.termno, 1);
/* HVSI, perform the handshake now */
hvsilib_establish(&hvterm_priv0.hvsi);
} else
return;
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
#ifdef HVC_OLD_HVSI
/* When using the old HVSI driver don't register the HVC
* backend for HVSI, only do udbg
*/
if (hvterm_priv0.proto == HV_PROTOCOL_HVSI)
return;
#endif
/* Check whether the user has requested a different console. */
if (!strstr(boot_command_line, "console="))
add_preferred_console("hvc", 0, NULL);
hvc_instantiate(0, 0, ops);
}
/* call this from early_init() for a working debug console on
* vterm capable LPAR machines
*/
#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR
void __init udbg_init_debug_lpar(void)
{
/*
* If we're running as a hypervisor then we definitely can't call the
* hypervisor to print debug output (we *are* the hypervisor), so don't
* register if we detect that MSR_HV=1.
*/
if (mfmsr() & MSR_HV)
return;
hvterm_privs[0] = &hvterm_priv0;
hvterm_priv0.termno = 0;
hvterm_priv0.proto = HV_PROTOCOL_RAW;
spin_lock_init(&hvterm_priv0.buf_lock);
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR */
#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI
void __init udbg_init_debug_lpar_hvsi(void)
{
/* See comment above in udbg_init_debug_lpar() */
if (mfmsr() & MSR_HV)
return;
hvterm_privs[0] = &hvterm_priv0;
hvterm_priv0.termno = CONFIG_PPC_EARLY_DEBUG_HVSI_VTERMNO;
hvterm_priv0.proto = HV_PROTOCOL_HVSI;
spin_lock_init(&hvterm_priv0.buf_lock);
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
hvterm_priv0.termno, 1);
hvsilib_establish(&hvterm_priv0.hvsi);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI */
|
/* file-mmu.c: ramfs MMU-based file operations
*
* Resizable simple ram filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
*
* Usage limits added by David Gibson, Linuxcare Australia.
* This file is released under the GPL.
*/
/*
* NOTE! This filesystem is probably most useful
* not as a real filesystem, but as an example of
* how virtual filesystems can be written.
*
* It doesn't get much simpler than this. Consider
* that this file implements the full semantics of
* a POSIX-compliant read-write filesystem.
*
* Note in particular how the filesystem does not
* need to implement any data structures of its own
* to keep track of the virtual data: using the VFS
* caches is sufficient.
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/ramfs.h>
#include <linux/sched.h>
#include "internal.h"
static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
}
const struct file_operations ramfs_file_operations = {
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = noop_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek,
.get_unmapped_area = ramfs_mmu_get_unmapped_area,
};
const struct inode_operations ramfs_file_inode_operations = {
.setattr = simple_setattr,
.getattr = simple_getattr,
};
|
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __MMHUB_V2_3_H__
#define __MMHUB_V2_3_H__
extern const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs;
#endif
|
#include <linux/lockdep.h>
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (C) 2005 Mike Isely <[email protected]>
* Copyright (C) 2004 Aurelien Alleaume <[email protected]>
*/
#ifndef __PVRUSB2_CX2584X_V4L_H
#define __PVRUSB2_CX2584X_V4L_H
/*
This module connects the pvrusb2 driver to the I2C chip level
driver which handles combined device audio & video processing.
This interface is used internally by the driver; higher level code
should only interact through the interface provided by
pvrusb2-hdw.h.
*/
#include "pvrusb2-hdw-internal.h"
void pvr2_cx25840_subdev_update(struct pvr2_hdw *, struct v4l2_subdev *sd);
#endif /* __PVRUSB2_CX2584X_V4L_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
*
* Author: ZH Chen <[email protected]>
*
*/
#ifndef __PINCTRL_MTK_MT6765_H
#define __PINCTRL_MTK_MT6765_H
#include "pinctrl-paris.h"
static struct mtk_pin_desc mtk_pins_mt6765[] = {
MTK_PIN(
0, "GPIO0",
MTK_EINT_FUNCTION(0, 0),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO0"),
MTK_FUNCTION(1, "UTXD1"),
MTK_FUNCTION(2, "CLKM0"),
MTK_FUNCTION(3, "MD_INT0"),
MTK_FUNCTION(4, "I2S0_MCK"),
MTK_FUNCTION(5, "MD_UTXD1"),
MTK_FUNCTION(6, "TP_GPIO0_AO"),
MTK_FUNCTION(7, "DBG_MON_B9")
),
MTK_PIN(
1, "GPIO1",
MTK_EINT_FUNCTION(0, 1),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO1"),
MTK_FUNCTION(1, "URXD1"),
MTK_FUNCTION(2, "CLKM1"),
MTK_FUNCTION(4, "I2S0_BCK"),
MTK_FUNCTION(5, "MD_URXD1"),
MTK_FUNCTION(6, "TP_GPIO1_AO"),
MTK_FUNCTION(7, "DBG_MON_B10")
),
MTK_PIN(
2, "GPIO2",
MTK_EINT_FUNCTION(0, 2),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO2"),
MTK_FUNCTION(1, "UCTS0"),
MTK_FUNCTION(2, "CLKM2"),
MTK_FUNCTION(3, "UTXD1"),
MTK_FUNCTION(4, "I2S0_LRCK"),
MTK_FUNCTION(5, "ANT_SEL6"),
MTK_FUNCTION(6, "TP_GPIO2_AO"),
MTK_FUNCTION(7, "DBG_MON_B11")
),
MTK_PIN(
3, "GPIO3",
MTK_EINT_FUNCTION(0, 3),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO3"),
MTK_FUNCTION(1, "URTS0"),
MTK_FUNCTION(2, "CLKM3"),
MTK_FUNCTION(3, "URXD1"),
MTK_FUNCTION(4, "I2S0_DI"),
MTK_FUNCTION(5, "ANT_SEL7"),
MTK_FUNCTION(6, "TP_GPIO3_AO"),
MTK_FUNCTION(7, "DBG_MON_B12")
),
MTK_PIN(
4, "GPIO4",
MTK_EINT_FUNCTION(0, 4),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO4"),
MTK_FUNCTION(1, "SPI1_B_MI"),
MTK_FUNCTION(2, "SCP_SPI1_MI"),
MTK_FUNCTION(3, "UCTS0"),
MTK_FUNCTION(4, "I2S3_MCK"),
MTK_FUNCTION(5, "SSPM_URXD_AO"),
MTK_FUNCTION(6, "TP_GPIO4_AO")
),
MTK_PIN(
5, "GPIO5",
MTK_EINT_FUNCTION(0, 5),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO5"),
MTK_FUNCTION(1, "SPI1_B_CSB"),
MTK_FUNCTION(2, "SCP_SPI1_CS"),
MTK_FUNCTION(3, "URTS0"),
MTK_FUNCTION(4, "I2S3_BCK"),
MTK_FUNCTION(5, "SSPM_UTXD_AO"),
MTK_FUNCTION(6, "TP_GPIO5_AO")
),
MTK_PIN(
6, "GPIO6",
MTK_EINT_FUNCTION(0, 6),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO6"),
MTK_FUNCTION(1, "SPI1_B_MO"),
MTK_FUNCTION(2, "SCP_SPI1_MO"),
MTK_FUNCTION(3, "PWM0"),
MTK_FUNCTION(4, "I2S3_LRCK"),
MTK_FUNCTION(5, "MD_UTXD0"),
MTK_FUNCTION(6, "TP_GPIO6_AO")
),
MTK_PIN(
7, "GPIO7",
MTK_EINT_FUNCTION(0, 7),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO7"),
MTK_FUNCTION(1, "SPI1_B_CLK"),
MTK_FUNCTION(2, "SCP_SPI1_CK"),
MTK_FUNCTION(3, "PWM1"),
MTK_FUNCTION(4, "I2S3_DO"),
MTK_FUNCTION(5, "MD_URXD0"),
MTK_FUNCTION(6, "TP_GPIO7_AO")
),
MTK_PIN(
8, "GPIO8",
MTK_EINT_FUNCTION(0, 8),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO8"),
MTK_FUNCTION(1, "UTXD1"),
MTK_FUNCTION(2, "SRCLKENAI0"),
MTK_FUNCTION(3, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(4, "ANT_SEL3"),
MTK_FUNCTION(5, "MFG_JTAG_TRSTN"),
MTK_FUNCTION(6, "I2S2_MCK"),
MTK_FUNCTION(7, "JTRSTN_SEL1")
),
MTK_PIN(
9, "GPIO9",
MTK_EINT_FUNCTION(0, 9),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO9"),
MTK_FUNCTION(1, "MD_INT0"),
MTK_FUNCTION(2, "CMMCLK2"),
MTK_FUNCTION(3, "CONN_MCU_TRST_B"),
MTK_FUNCTION(4, "IDDIG"),
MTK_FUNCTION(5, "SDA_6306"),
MTK_FUNCTION(6, "MCUPM_JTAG_TRSTN"),
MTK_FUNCTION(7, "DBG_MON_B22")
),
MTK_PIN(
10, "GPIO10",
MTK_EINT_FUNCTION(0, 10),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO10"),
MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(3, "CONN_MCU_DBGI_N"),
MTK_FUNCTION(4, "SRCLKENAI1"),
MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
MTK_FUNCTION(6, "CMVREF1"),
MTK_FUNCTION(7, "DBG_MON_B23")
),
MTK_PIN(
11, "GPIO11",
MTK_EINT_FUNCTION(0, 11),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO11"),
MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(2, "CLKM3"),
MTK_FUNCTION(3, "ANT_SEL6"),
MTK_FUNCTION(4, "SRCLKENAI0"),
MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
MTK_FUNCTION(6, "UCTS1"),
MTK_FUNCTION(7, "DBG_MON_B24")
),
MTK_PIN(
12, "GPIO12",
MTK_EINT_FUNCTION(0, 12),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO12"),
MTK_FUNCTION(1, "PWM0"),
MTK_FUNCTION(2, "SRCLKENAI1"),
MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
MTK_FUNCTION(4, "MD_INT0"),
MTK_FUNCTION(5, "DVFSRC_EXT_REQ"),
MTK_FUNCTION(6, "URTS1")
),
MTK_PIN(
13, "GPIO13",
MTK_EINT_FUNCTION(0, 13),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO13"),
MTK_FUNCTION(1, "ANT_SEL0"),
MTK_FUNCTION(2, "SPI4_MI"),
MTK_FUNCTION(3, "SCP_SPI0_MI"),
MTK_FUNCTION(4, "MD_URXD0"),
MTK_FUNCTION(5, "CLKM0"),
MTK_FUNCTION(6, "I2S0_MCK"),
MTK_FUNCTION(7, "DBG_MON_A0")
),
MTK_PIN(
14, "GPIO14",
MTK_EINT_FUNCTION(0, 14),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO14"),
MTK_FUNCTION(1, "ANT_SEL1"),
MTK_FUNCTION(2, "SPI4_CSB"),
MTK_FUNCTION(3, "SCP_SPI0_CS"),
MTK_FUNCTION(4, "MD_UTXD0"),
MTK_FUNCTION(5, "CLKM1"),
MTK_FUNCTION(6, "I2S0_BCK"),
MTK_FUNCTION(7, "DBG_MON_A1")
),
MTK_PIN(
15, "GPIO15",
MTK_EINT_FUNCTION(0, 15),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO15"),
MTK_FUNCTION(1, "ANT_SEL2"),
MTK_FUNCTION(2, "SPI4_MO"),
MTK_FUNCTION(3, "SCP_SPI0_MO"),
MTK_FUNCTION(4, "MD_URXD1"),
MTK_FUNCTION(5, "CLKM2"),
MTK_FUNCTION(6, "I2S0_LRCK"),
MTK_FUNCTION(7, "DBG_MON_A2")
),
MTK_PIN(
16, "GPIO16",
MTK_EINT_FUNCTION(0, 16),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO16"),
MTK_FUNCTION(1, "ANT_SEL3"),
MTK_FUNCTION(2, "SPI4_CLK"),
MTK_FUNCTION(3, "SCP_SPI0_CK"),
MTK_FUNCTION(4, "MD_UTXD1"),
MTK_FUNCTION(5, "CLKM3"),
MTK_FUNCTION(6, "I2S3_MCK"),
MTK_FUNCTION(7, "DBG_MON_A3")
),
MTK_PIN(
17, "GPIO17",
MTK_EINT_FUNCTION(0, 17),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO17"),
MTK_FUNCTION(1, "ANT_SEL4"),
MTK_FUNCTION(2, "SPI2_MO"),
MTK_FUNCTION(3, "SCP_SPI0_MO"),
MTK_FUNCTION(4, "PWM1"),
MTK_FUNCTION(5, "IDDIG"),
MTK_FUNCTION(6, "I2S0_DI"),
MTK_FUNCTION(7, "DBG_MON_A4")
),
MTK_PIN(
18, "GPIO18",
MTK_EINT_FUNCTION(0, 18),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO18"),
MTK_FUNCTION(1, "ANT_SEL5"),
MTK_FUNCTION(2, "SPI2_CLK"),
MTK_FUNCTION(3, "SCP_SPI0_CK"),
MTK_FUNCTION(4, "MD_INT0"),
MTK_FUNCTION(5, "USB_DRVVBUS"),
MTK_FUNCTION(6, "I2S3_BCK"),
MTK_FUNCTION(7, "DBG_MON_A5")
),
MTK_PIN(
19, "GPIO19",
MTK_EINT_FUNCTION(0, 19),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO19"),
MTK_FUNCTION(1, "ANT_SEL6"),
MTK_FUNCTION(2, "SPI2_MI"),
MTK_FUNCTION(3, "SCP_SPI0_MI"),
MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(6, "I2S3_LRCK"),
MTK_FUNCTION(7, "DBG_MON_A6")
),
MTK_PIN(
20, "GPIO20",
MTK_EINT_FUNCTION(0, 20),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO20"),
MTK_FUNCTION(1, "ANT_SEL7"),
MTK_FUNCTION(2, "SPI2_CSB"),
MTK_FUNCTION(3, "SCP_SPI0_CS"),
MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(5, "CMMCLK3"),
MTK_FUNCTION(6, "I2S3_DO"),
MTK_FUNCTION(7, "DBG_MON_A7")
),
MTK_PIN(
21, "GPIO21",
MTK_EINT_FUNCTION(0, 21),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO21"),
MTK_FUNCTION(1, "SPI3_MI"),
MTK_FUNCTION(2, "SRCLKENAI1"),
MTK_FUNCTION(3, "DAP_MD32_SWD"),
MTK_FUNCTION(4, "CMVREF0"),
MTK_FUNCTION(5, "SCP_SPI0_MI"),
MTK_FUNCTION(6, "I2S2_MCK"),
MTK_FUNCTION(7, "DBG_MON_A8")
),
MTK_PIN(
22, "GPIO22",
MTK_EINT_FUNCTION(0, 22),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO22"),
MTK_FUNCTION(1, "SPI3_CSB"),
MTK_FUNCTION(2, "SRCLKENAI0"),
MTK_FUNCTION(3, "DAP_MD32_SWCK"),
MTK_FUNCTION(4, "CMVREF1"),
MTK_FUNCTION(5, "SCP_SPI0_CS"),
MTK_FUNCTION(6, "I2S2_BCK"),
MTK_FUNCTION(7, "DBG_MON_A9")
),
MTK_PIN(
23, "GPIO23",
MTK_EINT_FUNCTION(0, 23),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO23"),
MTK_FUNCTION(1, "SPI3_MO"),
MTK_FUNCTION(2, "PWM0"),
MTK_FUNCTION(3, "KPROW7"),
MTK_FUNCTION(4, "ANT_SEL3"),
MTK_FUNCTION(5, "SCP_SPI0_MO"),
MTK_FUNCTION(6, "I2S2_LRCK"),
MTK_FUNCTION(7, "DBG_MON_A10")
),
MTK_PIN(
24, "GPIO24",
MTK_EINT_FUNCTION(0, 24),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO24"),
MTK_FUNCTION(1, "SPI3_CLK"),
MTK_FUNCTION(2, "UDI_TCK"),
MTK_FUNCTION(3, "IO_JTAG_TCK"),
MTK_FUNCTION(4, "SSPM_JTAG_TCK"),
MTK_FUNCTION(5, "SCP_SPI0_CK"),
MTK_FUNCTION(6, "I2S2_DI"),
MTK_FUNCTION(7, "DBG_MON_A11")
),
MTK_PIN(
25, "GPIO25",
MTK_EINT_FUNCTION(0, 25),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO25"),
MTK_FUNCTION(1, "SPI1_A_MI"),
MTK_FUNCTION(2, "UDI_TMS"),
MTK_FUNCTION(3, "IO_JTAG_TMS"),
MTK_FUNCTION(4, "SSPM_JTAG_TMS"),
MTK_FUNCTION(5, "KPROW3"),
MTK_FUNCTION(6, "I2S1_MCK"),
MTK_FUNCTION(7, "DBG_MON_A12")
),
MTK_PIN(
26, "GPIO26",
MTK_EINT_FUNCTION(0, 26),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO26"),
MTK_FUNCTION(1, "SPI1_A_CSB"),
MTK_FUNCTION(2, "UDI_TDI"),
MTK_FUNCTION(3, "IO_JTAG_TDI"),
MTK_FUNCTION(4, "SSPM_JTAG_TDI"),
MTK_FUNCTION(5, "KPROW4"),
MTK_FUNCTION(6, "I2S1_BCK"),
MTK_FUNCTION(7, "DBG_MON_A13")
),
MTK_PIN(
27, "GPIO27",
MTK_EINT_FUNCTION(0, 27),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO27"),
MTK_FUNCTION(1, "SPI1_A_MO"),
MTK_FUNCTION(2, "UDI_TDO"),
MTK_FUNCTION(3, "IO_JTAG_TDO"),
MTK_FUNCTION(4, "SSPM_JTAG_TDO"),
MTK_FUNCTION(5, "KPROW5"),
MTK_FUNCTION(6, "I2S1_LRCK"),
MTK_FUNCTION(7, "DBG_MON_A14")
),
MTK_PIN(
28, "GPIO28",
MTK_EINT_FUNCTION(0, 28),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO28"),
MTK_FUNCTION(1, "SPI1_A_CLK"),
MTK_FUNCTION(2, "UDI_NTRST"),
MTK_FUNCTION(3, "IO_JTAG_TRSTN"),
MTK_FUNCTION(4, "SSPM_JTAG_TRSTN"),
MTK_FUNCTION(5, "KPROW6"),
MTK_FUNCTION(6, "I2S1_DO"),
MTK_FUNCTION(7, "DBG_MON_A15")
),
MTK_PIN(
29, "GPIO29",
MTK_EINT_FUNCTION(0, 29),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO29"),
MTK_FUNCTION(1, "MSDC1_CLK"),
MTK_FUNCTION(2, "IO_JTAG_TCK"),
MTK_FUNCTION(3, "UDI_TCK"),
MTK_FUNCTION(4, "CONN_DSP_JCK"),
MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
MTK_FUNCTION(6, "CONN_MCU_AICE_TCKC"),
MTK_FUNCTION(7, "DAP_MD32_SWCK")
),
MTK_PIN(
30, "GPIO30",
MTK_EINT_FUNCTION(0, 30),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO30"),
MTK_FUNCTION(1, "MSDC1_CMD"),
MTK_FUNCTION(2, "IO_JTAG_TMS"),
MTK_FUNCTION(3, "UDI_TMS"),
MTK_FUNCTION(4, "CONN_DSP_JMS"),
MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC"),
MTK_FUNCTION(7, "DAP_MD32_SWD")
),
MTK_PIN(
31, "GPIO31",
MTK_EINT_FUNCTION(0, 31),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO31"),
MTK_FUNCTION(1, "MSDC1_DAT3")
),
MTK_PIN(
32, "GPIO32",
MTK_EINT_FUNCTION(0, 32),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO32"),
MTK_FUNCTION(1, "MSDC1_DAT0"),
MTK_FUNCTION(2, "IO_JTAG_TDI"),
MTK_FUNCTION(3, "UDI_TDI"),
MTK_FUNCTION(4, "CONN_DSP_JDI"),
MTK_FUNCTION(5, "SSPM_JTAG_TDI")
),
MTK_PIN(
33, "GPIO33",
MTK_EINT_FUNCTION(0, 33),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO33"),
MTK_FUNCTION(1, "MSDC1_DAT2"),
MTK_FUNCTION(2, "IO_JTAG_TRSTN"),
MTK_FUNCTION(3, "UDI_NTRST"),
MTK_FUNCTION(4, "CONN_DSP_JINTP"),
MTK_FUNCTION(5, "SSPM_JTAG_TRSTN")
),
MTK_PIN(
34, "GPIO34",
MTK_EINT_FUNCTION(0, 34),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO34"),
MTK_FUNCTION(1, "MSDC1_DAT1"),
MTK_FUNCTION(2, "IO_JTAG_TDO"),
MTK_FUNCTION(3, "UDI_TDO"),
MTK_FUNCTION(4, "CONN_DSP_JDO"),
MTK_FUNCTION(5, "SSPM_JTAG_TDO")
),
MTK_PIN(
35, "GPIO35",
MTK_EINT_FUNCTION(0, 35),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO35"),
MTK_FUNCTION(1, "MD1_SIM2_SIO"),
MTK_FUNCTION(2, "CCU_JTAG_TDO"),
MTK_FUNCTION(3, "MD1_SIM1_SIO"),
MTK_FUNCTION(5, "SCP_JTAG_TDO"),
MTK_FUNCTION(6, "CONN_DSP_JDO"),
MTK_FUNCTION(7, "DBG_MON_A16")
),
MTK_PIN(
36, "GPIO36",
MTK_EINT_FUNCTION(0, 36),
DRV_GRP0,
MTK_FUNCTION(0, "GPIO36"),
MTK_FUNCTION(1, "MD1_SIM2_SRST"),
MTK_FUNCTION(2, "CCU_JTAG_TMS"),
MTK_FUNCTION(3, "MD1_SIM1_SRST"),
MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
MTK_FUNCTION(5, "SCP_JTAG_TMS"),
MTK_FUNCTION(6, "CONN_DSP_JMS"),
MTK_FUNCTION(7, "DBG_MON_A17")
),
MTK_PIN(
37, "GPIO37",
MTK_EINT_FUNCTION(0, 37),
DRV_GRP0,
MTK_FUNCTION(0, "GPIO37"),
MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
MTK_FUNCTION(2, "CCU_JTAG_TDI"),
MTK_FUNCTION(3, "MD1_SIM1_SCLK"),
MTK_FUNCTION(5, "SCP_JTAG_TDI"),
MTK_FUNCTION(6, "CONN_DSP_JDI"),
MTK_FUNCTION(7, "DBG_MON_A18")
),
MTK_PIN(
38, "GPIO38",
MTK_EINT_FUNCTION(0, 38),
DRV_GRP0,
MTK_FUNCTION(0, "GPIO38"),
MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
MTK_FUNCTION(3, "MD1_SIM2_SCLK"),
MTK_FUNCTION(7, "DBG_MON_A19")
),
MTK_PIN(
39, "GPIO39",
MTK_EINT_FUNCTION(0, 39),
DRV_GRP0,
MTK_FUNCTION(0, "GPIO39"),
MTK_FUNCTION(1, "MD1_SIM1_SRST"),
MTK_FUNCTION(2, "CCU_JTAG_TCK"),
MTK_FUNCTION(3, "MD1_SIM2_SRST"),
MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
MTK_FUNCTION(5, "SCP_JTAG_TCK"),
MTK_FUNCTION(6, "CONN_DSP_JCK"),
MTK_FUNCTION(7, "DBG_MON_A20")
),
MTK_PIN(
40, "GPIO40",
MTK_EINT_FUNCTION(0, 40),
DRV_GRP0,
MTK_FUNCTION(0, "GPIO40"),
MTK_FUNCTION(1, "MD1_SIM1_SIO"),
MTK_FUNCTION(2, "CCU_JTAG_TRST"),
MTK_FUNCTION(3, "MD1_SIM2_SIO"),
MTK_FUNCTION(5, "SCP_JTAG_TRSTN"),
MTK_FUNCTION(6, "CONN_DSP_JINTP"),
MTK_FUNCTION(7, "DBG_MON_A21")
),
MTK_PIN(
41, "GPIO41",
MTK_EINT_FUNCTION(0, 41),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO41"),
MTK_FUNCTION(1, "IDDIG"),
MTK_FUNCTION(2, "URXD1"),
MTK_FUNCTION(3, "UCTS0"),
MTK_FUNCTION(4, "KPCOL2"),
MTK_FUNCTION(5, "SSPM_UTXD_AO"),
MTK_FUNCTION(6, "MD_INT0"),
MTK_FUNCTION(7, "DBG_MON_A22")
),
MTK_PIN(
42, "GPIO42",
MTK_EINT_FUNCTION(0, 42),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO42"),
MTK_FUNCTION(1, "USB_DRVVBUS"),
MTK_FUNCTION(2, "UTXD1"),
MTK_FUNCTION(3, "URTS0"),
MTK_FUNCTION(4, "KPROW2"),
MTK_FUNCTION(5, "SSPM_URXD_AO"),
MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(7, "DBG_MON_A23")
),
MTK_PIN(
43, "GPIO43",
MTK_EINT_FUNCTION(0, 43),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO43"),
MTK_FUNCTION(1, "DISP_PWM"),
MTK_FUNCTION(7, "DBG_MON_A24")
),
MTK_PIN(
44, "GPIO44",
MTK_EINT_FUNCTION(0, 44),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO44"),
MTK_FUNCTION(1, "DSI_TE"),
MTK_FUNCTION(7, "DBG_MON_A25")
),
MTK_PIN(
45, "GPIO45",
MTK_EINT_FUNCTION(0, 45),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO45"),
MTK_FUNCTION(1, "LCM_RST"),
MTK_FUNCTION(7, "DBG_MON_A26")
),
MTK_PIN(
46, "GPIO46",
MTK_EINT_FUNCTION(0, 46),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO46"),
MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(2, "UCTS0"),
MTK_FUNCTION(3, "UCTS1"),
MTK_FUNCTION(4, "IDDIG"),
MTK_FUNCTION(5, "SCL_6306"),
MTK_FUNCTION(6, "TP_UCTS1_AO"),
MTK_FUNCTION(7, "DBG_MON_A27")
),
MTK_PIN(
47, "GPIO47",
MTK_EINT_FUNCTION(0, 47),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO47"),
MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(2, "URTS0"),
MTK_FUNCTION(3, "URTS1"),
MTK_FUNCTION(4, "USB_DRVVBUS"),
MTK_FUNCTION(5, "SDA_6306"),
MTK_FUNCTION(6, "TP_URTS1_AO"),
MTK_FUNCTION(7, "DBG_MON_A28")
),
MTK_PIN(
48, "GPIO48",
MTK_EINT_FUNCTION(0, 48),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO48"),
MTK_FUNCTION(1, "SCL5"),
MTK_FUNCTION(7, "DBG_MON_A29")
),
MTK_PIN(
49, "GPIO49",
MTK_EINT_FUNCTION(0, 49),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO49"),
MTK_FUNCTION(1, "SDA5"),
MTK_FUNCTION(7, "DBG_MON_A30")
),
MTK_PIN(
50, "GPIO50",
MTK_EINT_FUNCTION(0, 50),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO50"),
MTK_FUNCTION(1, "SCL3"),
MTK_FUNCTION(2, "URXD1"),
MTK_FUNCTION(3, "MD_URXD1"),
MTK_FUNCTION(4, "SSPM_URXD_AO"),
MTK_FUNCTION(5, "IDDIG"),
MTK_FUNCTION(6, "TP_URXD1_AO"),
MTK_FUNCTION(7, "DBG_MON_A31")
),
MTK_PIN(
51, "GPIO51",
MTK_EINT_FUNCTION(0, 51),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO51"),
MTK_FUNCTION(1, "SDA3"),
MTK_FUNCTION(2, "UTXD1"),
MTK_FUNCTION(3, "MD_UTXD1"),
MTK_FUNCTION(4, "SSPM_UTXD_AO"),
MTK_FUNCTION(5, "USB_DRVVBUS"),
MTK_FUNCTION(6, "TP_UTXD1_AO"),
MTK_FUNCTION(7, "DBG_MON_A32")
),
MTK_PIN(
52, "GPIO52",
MTK_EINT_FUNCTION(0, 52),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO52"),
MTK_FUNCTION(1, "BPI_BUS15")
),
MTK_PIN(
53, "GPIO53",
MTK_EINT_FUNCTION(0, 53),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO53"),
MTK_FUNCTION(1, "BPI_BUS13")
),
MTK_PIN(
54, "GPIO54",
MTK_EINT_FUNCTION(0, 54),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO54"),
MTK_FUNCTION(1, "BPI_BUS12")
),
MTK_PIN(
55, "GPIO55",
MTK_EINT_FUNCTION(0, 55),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO55"),
MTK_FUNCTION(1, "BPI_BUS8")
),
MTK_PIN(
56, "GPIO56",
MTK_EINT_FUNCTION(0, 56),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO56"),
MTK_FUNCTION(1, "BPI_BUS9"),
MTK_FUNCTION(2, "SCL_6306")
),
MTK_PIN(
57, "GPIO57",
MTK_EINT_FUNCTION(0, 57),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO57"),
MTK_FUNCTION(1, "BPI_BUS10"),
MTK_FUNCTION(2, "SDA_6306")
),
MTK_PIN(
58, "GPIO58",
MTK_EINT_FUNCTION(0, 58),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO58"),
MTK_FUNCTION(1, "RFIC0_BSI_D2")
),
MTK_PIN(
59, "GPIO59",
MTK_EINT_FUNCTION(0, 59),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO59"),
MTK_FUNCTION(1, "RFIC0_BSI_D1")
),
MTK_PIN(
60, "GPIO60",
MTK_EINT_FUNCTION(0, 60),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO60"),
MTK_FUNCTION(1, "RFIC0_BSI_D0")
),
MTK_PIN(
61, "GPIO61",
MTK_EINT_FUNCTION(0, 61),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO61"),
MTK_FUNCTION(1, "MIPI1_SDATA")
),
MTK_PIN(
62, "GPIO62",
MTK_EINT_FUNCTION(0, 62),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO62"),
MTK_FUNCTION(1, "MIPI1_SCLK")
),
MTK_PIN(
63, "GPIO63",
MTK_EINT_FUNCTION(0, 63),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO63"),
MTK_FUNCTION(1, "MIPI0_SDATA")
),
MTK_PIN(
64, "GPIO64",
MTK_EINT_FUNCTION(0, 64),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO64"),
MTK_FUNCTION(1, "MIPI0_SCLK")
),
MTK_PIN(
65, "GPIO65",
MTK_EINT_FUNCTION(0, 65),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO65"),
MTK_FUNCTION(1, "MIPI3_SDATA"),
MTK_FUNCTION(2, "BPI_BUS16")
),
MTK_PIN(
66, "GPIO66",
MTK_EINT_FUNCTION(0, 66),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO66"),
MTK_FUNCTION(1, "MIPI3_SCLK"),
MTK_FUNCTION(2, "BPI_BUS17")
),
MTK_PIN(
67, "GPIO67",
MTK_EINT_FUNCTION(0, 67),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO67"),
MTK_FUNCTION(1, "MIPI2_SDATA")
),
MTK_PIN(
68, "GPIO68",
MTK_EINT_FUNCTION(0, 68),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO68"),
MTK_FUNCTION(1, "MIPI2_SCLK")
),
MTK_PIN(
69, "GPIO69",
MTK_EINT_FUNCTION(0, 69),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO69"),
MTK_FUNCTION(1, "BPI_BUS7")
),
MTK_PIN(
70, "GPIO70",
MTK_EINT_FUNCTION(0, 70),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO70"),
MTK_FUNCTION(1, "BPI_BUS6")
),
MTK_PIN(
71, "GPIO71",
MTK_EINT_FUNCTION(0, 71),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO71"),
MTK_FUNCTION(1, "BPI_BUS5")
),
MTK_PIN(
72, "GPIO72",
MTK_EINT_FUNCTION(0, 72),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO72"),
MTK_FUNCTION(1, "BPI_BUS4")
),
MTK_PIN(
73, "GPIO73",
MTK_EINT_FUNCTION(0, 73),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO73"),
MTK_FUNCTION(1, "BPI_BUS3")
),
MTK_PIN(
74, "GPIO74",
MTK_EINT_FUNCTION(0, 74),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO74"),
MTK_FUNCTION(1, "BPI_BUS2")
),
MTK_PIN(
75, "GPIO75",
MTK_EINT_FUNCTION(0, 75),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO75"),
MTK_FUNCTION(1, "BPI_BUS1")
),
MTK_PIN(
76, "GPIO76",
MTK_EINT_FUNCTION(0, 76),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO76"),
MTK_FUNCTION(1, "BPI_BUS0")
),
MTK_PIN(
77, "GPIO77",
MTK_EINT_FUNCTION(0, 77),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO77"),
MTK_FUNCTION(1, "BPI_BUS14")
),
MTK_PIN(
78, "GPIO78",
MTK_EINT_FUNCTION(0, 78),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO78"),
MTK_FUNCTION(1, "BPI_BUS11")
),
MTK_PIN(
79, "GPIO79",
MTK_EINT_FUNCTION(0, 79),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO79"),
MTK_FUNCTION(1, "BPI_PA_VM1"),
MTK_FUNCTION(2, "MIPI4_SDATA")
),
MTK_PIN(
80, "GPIO80",
MTK_EINT_FUNCTION(0, 80),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO80"),
MTK_FUNCTION(1, "BPI_PA_VM0"),
MTK_FUNCTION(2, "MIPI4_SCLK")
),
MTK_PIN(
81, "GPIO81",
MTK_EINT_FUNCTION(0, 81),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO81"),
MTK_FUNCTION(1, "SDA1"),
MTK_FUNCTION(7, "DBG_MON_B0")
),
MTK_PIN(
82, "GPIO82",
MTK_EINT_FUNCTION(0, 82),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO82"),
MTK_FUNCTION(1, "SDA0"),
MTK_FUNCTION(7, "DBG_MON_B1")
),
MTK_PIN(
83, "GPIO83",
MTK_EINT_FUNCTION(0, 83),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO83"),
MTK_FUNCTION(1, "SCL0"),
MTK_FUNCTION(7, "DBG_MON_B2")
),
MTK_PIN(
84, "GPIO84",
MTK_EINT_FUNCTION(0, 84),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO84"),
MTK_FUNCTION(1, "SCL1"),
MTK_FUNCTION(7, "DBG_MON_B3")
),
MTK_PIN(
85, "GPIO85",
MTK_EINT_FUNCTION(0, 85),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO85"),
MTK_FUNCTION(1, "RFIC0_BSI_EN")
),
MTK_PIN(
86, "GPIO86",
MTK_EINT_FUNCTION(0, 86),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO86"),
MTK_FUNCTION(1, "RFIC0_BSI_CK")
),
MTK_PIN(
87, "GPIO87",
MTK_EINT_FUNCTION(0, 87),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO87"),
MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(3, "CMVREF0"),
MTK_FUNCTION(4, "MD_URXD0"),
MTK_FUNCTION(5, "AGPS_SYNC"),
MTK_FUNCTION(6, "EXT_FRAME_SYNC")
),
MTK_PIN(
88, "GPIO88",
MTK_EINT_FUNCTION(0, 88),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO88"),
MTK_FUNCTION(1, "CMMCLK3"),
MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(3, "CMVREF1"),
MTK_FUNCTION(4, "MD_UTXD0"),
MTK_FUNCTION(5, "AGPS_SYNC"),
MTK_FUNCTION(6, "DVFSRC_EXT_REQ")
),
MTK_PIN(
89, "GPIO89",
MTK_EINT_FUNCTION(0, 89),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO89"),
MTK_FUNCTION(1, "SRCLKENAI0"),
MTK_FUNCTION(2, "PWM2"),
MTK_FUNCTION(3, "MD_INT0"),
MTK_FUNCTION(4, "USB_DRVVBUS"),
MTK_FUNCTION(5, "SCL_6306"),
MTK_FUNCTION(6, "TP_GPIO4_AO"),
MTK_FUNCTION(7, "DBG_MON_B21")
),
MTK_PIN(
90, "GPIO90",
MTK_EINT_FUNCTION(0, 90),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO90"),
MTK_FUNCTION(1, "URXD1"),
MTK_FUNCTION(2, "PWM0"),
MTK_FUNCTION(3, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(4, "ANT_SEL4"),
MTK_FUNCTION(5, "USB_DRVVBUS"),
MTK_FUNCTION(6, "I2S2_BCK"),
MTK_FUNCTION(7, "DBG_MON_B4")
),
MTK_PIN(
91, "GPIO91",
MTK_EINT_FUNCTION(0, 91),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO91"),
MTK_FUNCTION(1, "KPROW1"),
MTK_FUNCTION(2, "PWM2"),
MTK_FUNCTION(3, "MD_INT0"),
MTK_FUNCTION(4, "ANT_SEL5"),
MTK_FUNCTION(5, "IDDIG"),
MTK_FUNCTION(6, "I2S2_LRCK"),
MTK_FUNCTION(7, "DBG_MON_B5")
),
MTK_PIN(
92, "GPIO92",
MTK_EINT_FUNCTION(0, 92),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO92"),
MTK_FUNCTION(1, "KPROW0"),
MTK_FUNCTION(5, "DVFSRC_EXT_REQ"),
MTK_FUNCTION(6, "I2S2_DI"),
MTK_FUNCTION(7, "DBG_MON_B6")
),
MTK_PIN(
93, "GPIO93",
MTK_EINT_FUNCTION(0, 93),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO93"),
MTK_FUNCTION(1, "KPCOL0"),
MTK_FUNCTION(7, "DBG_MON_B7")
),
MTK_PIN(
94, "GPIO94",
MTK_EINT_FUNCTION(0, 94),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO94"),
MTK_FUNCTION(1, "KPCOL1"),
MTK_FUNCTION(5, "CMFLASH"),
MTK_FUNCTION(6, "CMVREF0"),
MTK_FUNCTION(7, "DBG_MON_B8")
),
MTK_PIN(
95, "GPIO95",
MTK_EINT_FUNCTION(0, 95),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO95"),
MTK_FUNCTION(1, "URXD0"),
MTK_FUNCTION(2, "UTXD0"),
MTK_FUNCTION(3, "MD_URXD0"),
MTK_FUNCTION(4, "PTA_RXD"),
MTK_FUNCTION(5, "SSPM_URXD_AO"),
MTK_FUNCTION(6, "WIFI_RXD")
),
MTK_PIN(
96, "GPIO96",
MTK_EINT_FUNCTION(0, 96),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO96"),
MTK_FUNCTION(1, "UTXD0"),
MTK_FUNCTION(2, "URXD0"),
MTK_FUNCTION(3, "MD_UTXD0"),
MTK_FUNCTION(4, "PTA_TXD"),
MTK_FUNCTION(5, "SSPM_UTXD_AO"),
MTK_FUNCTION(6, "WIFI_TXD")
),
MTK_PIN(
97, "GPIO97",
MTK_EINT_FUNCTION(0, 97),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO97"),
MTK_FUNCTION(1, "UCTS0"),
MTK_FUNCTION(2, "I2S1_MCK"),
MTK_FUNCTION(3, "CONN_MCU_TDO"),
MTK_FUNCTION(4, "SPI5_MI"),
MTK_FUNCTION(5, "SCL_6306"),
MTK_FUNCTION(6, "MCUPM_JTAG_TDO"),
MTK_FUNCTION(7, "DBG_MON_B15")
),
MTK_PIN(
98, "GPIO98",
MTK_EINT_FUNCTION(0, 98),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO98"),
MTK_FUNCTION(1, "URTS0"),
MTK_FUNCTION(2, "I2S1_BCK"),
MTK_FUNCTION(3, "CONN_MCU_TMS"),
MTK_FUNCTION(4, "SPI5_CSB"),
MTK_FUNCTION(6, "MCUPM_JTAG_TMS"),
MTK_FUNCTION(7, "DBG_MON_B16")
),
MTK_PIN(
99, "GPIO99",
MTK_EINT_FUNCTION(0, 99),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO99"),
MTK_FUNCTION(1, "CMMCLK0"),
MTK_FUNCTION(4, "AUXIF_CLK"),
MTK_FUNCTION(5, "PTA_RXD"),
MTK_FUNCTION(6, "CONN_UART0_RXD"),
MTK_FUNCTION(7, "DBG_MON_B17")
),
MTK_PIN(
100, "GPIO100",
MTK_EINT_FUNCTION(0, 100),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO100"),
MTK_FUNCTION(1, "CMMCLK1"),
MTK_FUNCTION(4, "AUXIF_ST"),
MTK_FUNCTION(5, "PTA_TXD"),
MTK_FUNCTION(6, "CONN_UART0_TXD"),
MTK_FUNCTION(7, "DBG_MON_B18")
),
MTK_PIN(
101, "GPIO101",
MTK_EINT_FUNCTION(0, 101),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO101"),
MTK_FUNCTION(1, "CMFLASH"),
MTK_FUNCTION(2, "I2S1_LRCK"),
MTK_FUNCTION(3, "CONN_MCU_TCK"),
MTK_FUNCTION(4, "SPI5_MO"),
MTK_FUNCTION(6, "MCUPM_JTAG_TCK"),
MTK_FUNCTION(7, "DBG_MON_B19")
),
MTK_PIN(
102, "GPIO102",
MTK_EINT_FUNCTION(0, 102),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO102"),
MTK_FUNCTION(1, "CMVREF0"),
MTK_FUNCTION(2, "I2S1_DO"),
MTK_FUNCTION(3, "CONN_MCU_TDI"),
MTK_FUNCTION(4, "SPI5_CLK"),
MTK_FUNCTION(5, "AGPS_SYNC"),
MTK_FUNCTION(6, "MCUPM_JTAG_TDI"),
MTK_FUNCTION(7, "DBG_MON_B20")
),
MTK_PIN(
103, "GPIO103",
MTK_EINT_FUNCTION(0, 103),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO103"),
MTK_FUNCTION(1, "SCL2"),
MTK_FUNCTION(2, "TP_UTXD1_AO"),
MTK_FUNCTION(3, "MD_UTXD0"),
MTK_FUNCTION(4, "MD_UTXD1"),
MTK_FUNCTION(5, "TP_URTS2_AO"),
MTK_FUNCTION(6, "WIFI_TXD"),
MTK_FUNCTION(7, "DBG_MON_B25")
),
MTK_PIN(
104, "GPIO104",
MTK_EINT_FUNCTION(0, 104),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO104"),
MTK_FUNCTION(1, "SDA2"),
MTK_FUNCTION(2, "TP_URXD1_AO"),
MTK_FUNCTION(3, "MD_URXD0"),
MTK_FUNCTION(4, "MD_URXD1"),
MTK_FUNCTION(5, "TP_UCTS2_AO"),
MTK_FUNCTION(6, "WIFI_RXD"),
MTK_FUNCTION(7, "DBG_MON_B26")
),
MTK_PIN(
105, "GPIO105",
MTK_EINT_FUNCTION(0, 105),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO105"),
MTK_FUNCTION(1, "SCL4"),
MTK_FUNCTION(3, "MD_UTXD1"),
MTK_FUNCTION(4, "MD_UTXD0"),
MTK_FUNCTION(5, "TP_UTXD2_AO"),
MTK_FUNCTION(6, "PTA_TXD"),
MTK_FUNCTION(7, "DBG_MON_B27")
),
MTK_PIN(
106, "GPIO106",
MTK_EINT_FUNCTION(0, 106),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO106"),
MTK_FUNCTION(1, "SDA4"),
MTK_FUNCTION(3, "MD_URXD1"),
MTK_FUNCTION(4, "MD_URXD0"),
MTK_FUNCTION(5, "TP_URXD2_AO"),
MTK_FUNCTION(6, "PTA_RXD"),
MTK_FUNCTION(7, "DBG_MON_B28")
),
MTK_PIN(
107, "GPIO107",
MTK_EINT_FUNCTION(0, 107),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO107"),
MTK_FUNCTION(1, "UTXD1"),
MTK_FUNCTION(2, "MD_UTXD0"),
MTK_FUNCTION(3, "SDA_6306"),
MTK_FUNCTION(4, "KPCOL3"),
MTK_FUNCTION(5, "CMVREF0"),
MTK_FUNCTION(6, "URTS0"),
MTK_FUNCTION(7, "DBG_MON_B29")
),
MTK_PIN(
108, "GPIO108",
MTK_EINT_FUNCTION(0, 108),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO108"),
MTK_FUNCTION(1, "CMMCLK2"),
MTK_FUNCTION(2, "MD_INT0"),
MTK_FUNCTION(3, "CONN_MCU_DBGACK_N"),
MTK_FUNCTION(4, "KPCOL4"),
MTK_FUNCTION(6, "I2S3_MCK"),
MTK_FUNCTION(7, "DBG_MON_B30")
),
MTK_PIN(
109, "GPIO109",
MTK_EINT_FUNCTION(0, 109),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO109"),
MTK_FUNCTION(1, "URXD1"),
MTK_FUNCTION(2, "MD_URXD0"),
MTK_FUNCTION(3, "ANT_SEL7"),
MTK_FUNCTION(4, "KPCOL5"),
MTK_FUNCTION(5, "CMVREF1"),
MTK_FUNCTION(6, "UCTS0"),
MTK_FUNCTION(7, "DBG_MON_B31")
),
MTK_PIN(
110, "GPIO110",
MTK_EINT_FUNCTION(0, 110),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO110"),
MTK_FUNCTION(1, "ANT_SEL0"),
MTK_FUNCTION(2, "CLKM0"),
MTK_FUNCTION(3, "PWM3"),
MTK_FUNCTION(4, "MD_INT0"),
MTK_FUNCTION(5, "IDDIG"),
MTK_FUNCTION(6, "I2S3_BCK"),
MTK_FUNCTION(7, "DBG_MON_B13")
),
MTK_PIN(
111, "GPIO111",
MTK_EINT_FUNCTION(0, 111),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO111"),
MTK_FUNCTION(1, "ANT_SEL1"),
MTK_FUNCTION(2, "CLKM1"),
MTK_FUNCTION(3, "PWM4"),
MTK_FUNCTION(4, "PTA_RXD"),
MTK_FUNCTION(5, "CMVREF0"),
MTK_FUNCTION(6, "I2S3_LRCK"),
MTK_FUNCTION(7, "DBG_MON_B14")
),
MTK_PIN(
112, "GPIO112",
MTK_EINT_FUNCTION(0, 112),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO112"),
MTK_FUNCTION(1, "ANT_SEL2"),
MTK_FUNCTION(2, "CLKM2"),
MTK_FUNCTION(3, "PWM5"),
MTK_FUNCTION(4, "PTA_TXD"),
MTK_FUNCTION(5, "CMVREF1"),
MTK_FUNCTION(6, "I2S3_DO")
),
MTK_PIN(
113, "GPIO113",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO113"),
MTK_FUNCTION(1, "CONN_TOP_CLK")
),
MTK_PIN(
114, "GPIO114",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO114"),
MTK_FUNCTION(1, "CONN_TOP_DATA")
),
MTK_PIN(
115, "GPIO115",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO115"),
MTK_FUNCTION(1, "CONN_BT_CLK")
),
MTK_PIN(
116, "GPIO116",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO116"),
MTK_FUNCTION(1, "CONN_BT_DATA")
),
MTK_PIN(
117, "GPIO117",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO117"),
MTK_FUNCTION(1, "CONN_WF_CTRL0")
),
MTK_PIN(
118, "GPIO118",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO118"),
MTK_FUNCTION(1, "CONN_WF_CTRL1")
),
MTK_PIN(
119, "GPIO119",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO119"),
MTK_FUNCTION(1, "CONN_WF_CTRL2")
),
MTK_PIN(
120, "GPIO120",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO120"),
MTK_FUNCTION(1, "CONN_WB_PTA")
),
MTK_PIN(
121, "GPIO121",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO121"),
MTK_FUNCTION(1, "CONN_HRST_B")
),
MTK_PIN(
122, "GPIO122",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO122"),
MTK_FUNCTION(1, "MSDC0_CMD"),
MTK_FUNCTION(2, "MSDC0_CMD")
),
MTK_PIN(
123, "GPIO123",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO123"),
MTK_FUNCTION(1, "MSDC0_DAT0"),
MTK_FUNCTION(2, "MSDC0_DAT4")
),
MTK_PIN(
124, "GPIO124",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO124"),
MTK_FUNCTION(1, "MSDC0_CLK"),
MTK_FUNCTION(2, "MSDC0_CLK")
),
MTK_PIN(
125, "GPIO125",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO125"),
MTK_FUNCTION(1, "MSDC0_DAT2"),
MTK_FUNCTION(2, "MSDC0_DAT5")
),
MTK_PIN(
126, "GPIO126",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO126"),
MTK_FUNCTION(1, "MSDC0_DAT4"),
MTK_FUNCTION(2, "MSDC0_DAT2")
),
MTK_PIN(
127, "GPIO127",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO127"),
MTK_FUNCTION(1, "MSDC0_DAT6"),
MTK_FUNCTION(2, "MSDC0_DAT1")
),
MTK_PIN(
128, "GPIO128",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO128"),
MTK_FUNCTION(1, "MSDC0_DAT1"),
MTK_FUNCTION(2, "MSDC0_DAT6")
),
MTK_PIN(
129, "GPIO129",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO129"),
MTK_FUNCTION(1, "MSDC0_DAT5"),
MTK_FUNCTION(2, "MSDC0_DAT0")
),
MTK_PIN(
130, "GPIO130",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO130"),
MTK_FUNCTION(1, "MSDC0_DAT7"),
MTK_FUNCTION(2, "MSDC0_DAT7")
),
MTK_PIN(
131, "GPIO131",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO131"),
MTK_FUNCTION(1, "MSDC0_DSL"),
MTK_FUNCTION(2, "MSDC0_DSL")
),
MTK_PIN(
132, "GPIO132",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO132"),
MTK_FUNCTION(1, "MSDC0_DAT3"),
MTK_FUNCTION(2, "MSDC0_DAT3")
),
MTK_PIN(
133, "GPIO133",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO133"),
MTK_FUNCTION(1, "MSDC0_RSTB"),
MTK_FUNCTION(2, "MSDC0_RSTB")
),
MTK_PIN(
134, "GPIO134",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO134"),
MTK_FUNCTION(1, "RTC32K_CK")
),
MTK_PIN(
135, "GPIO135",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO135"),
MTK_FUNCTION(1, "WATCHDOG")
),
MTK_PIN(
136, "GPIO136",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO136"),
MTK_FUNCTION(1, "AUD_CLK_MOSI"),
MTK_FUNCTION(2, "AUD_CLK_MISO"),
MTK_FUNCTION(3, "I2S1_MCK")
),
MTK_PIN(
137, "GPIO137",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO137"),
MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
MTK_FUNCTION(2, "AUD_SYNC_MISO"),
MTK_FUNCTION(3, "I2S1_BCK")
),
MTK_PIN(
138, "GPIO138",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO138"),
MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
MTK_FUNCTION(2, "AUD_DAT_MISO0"),
MTK_FUNCTION(3, "I2S1_LRCK")
),
MTK_PIN(
139, "GPIO139",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO139"),
MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
MTK_FUNCTION(2, "AUD_DAT_MISO1"),
MTK_FUNCTION(3, "I2S1_DO")
),
MTK_PIN(
140, "GPIO140",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO140"),
MTK_FUNCTION(1, "AUD_CLK_MISO"),
MTK_FUNCTION(2, "AUD_CLK_MOSI"),
MTK_FUNCTION(3, "I2S2_MCK")
),
MTK_PIN(
141, "GPIO141",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO141"),
MTK_FUNCTION(1, "AUD_SYNC_MISO"),
MTK_FUNCTION(2, "AUD_SYNC_MOSI"),
MTK_FUNCTION(3, "I2S2_BCK")
),
MTK_PIN(
142, "GPIO142",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO142"),
MTK_FUNCTION(1, "AUD_DAT_MISO0"),
MTK_FUNCTION(2, "AUD_DAT_MOSI0"),
MTK_FUNCTION(3, "I2S2_LRCK")
),
MTK_PIN(
143, "GPIO143",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO143"),
MTK_FUNCTION(1, "AUD_DAT_MISO1"),
MTK_FUNCTION(2, "AUD_DAT_MOSI1"),
MTK_FUNCTION(3, "I2S2_DI")
),
MTK_PIN(
144, "GPIO144",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO144"),
MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
MTK_FUNCTION(2, "PWRAP_SPI0_MO")
),
MTK_PIN(
145, "GPIO145",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO145"),
MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
),
MTK_PIN(
146, "GPIO146",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO146"),
MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
MTK_FUNCTION(2, "PWRAP_SPI0_MI")
),
MTK_PIN(
147, "GPIO147",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO147"),
MTK_FUNCTION(1, "PWRAP_SPI0_CK")
),
MTK_PIN(
148, "GPIO148",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO148"),
MTK_FUNCTION(1, "SRCLKENA0")
),
MTK_PIN(
149, "GPIO149",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO149"),
MTK_FUNCTION(1, "SRCLKENA1")
),
MTK_PIN(
150, "GPIO150",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO150"),
MTK_FUNCTION(1, "PWM0"),
MTK_FUNCTION(2, "CMFLASH"),
MTK_FUNCTION(3, "ANT_SEL3"),
MTK_FUNCTION(5, "MD_URXD0"),
MTK_FUNCTION(6, "TP_URXD2_AO")
),
MTK_PIN(
151, "GPIO151",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO151"),
MTK_FUNCTION(1, "PWM1"),
MTK_FUNCTION(2, "CMVREF0"),
MTK_FUNCTION(3, "ANT_SEL4"),
MTK_FUNCTION(5, "MD_UTXD0"),
MTK_FUNCTION(6, "TP_UTXD2_AO")
),
MTK_PIN(
152, "GPIO152",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO152"),
MTK_FUNCTION(1, "PWM2"),
MTK_FUNCTION(2, "CMVREF1"),
MTK_FUNCTION(3, "ANT_SEL5"),
MTK_FUNCTION(5, "MD_URXD1"),
MTK_FUNCTION(6, "TP_UCTS1_AO")
),
MTK_PIN(
153, "GPIO153",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO153"),
MTK_FUNCTION(1, "PWM3"),
MTK_FUNCTION(2, "CLKM0"),
MTK_FUNCTION(3, "ANT_SEL6"),
MTK_FUNCTION(5, "MD_UTXD1"),
MTK_FUNCTION(6, "TP_URTS1_AO")
),
MTK_PIN(
154, "GPIO154",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO154"),
MTK_FUNCTION(1, "PWM5"),
MTK_FUNCTION(2, "CLKM2"),
MTK_FUNCTION(3, "USB_DRVVBUS"),
MTK_FUNCTION(5, "PTA_TXD"),
MTK_FUNCTION(6, "CONN_UART0_TXD")
),
MTK_PIN(
155, "GPIO155",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO155"),
MTK_FUNCTION(1, "SPI0_MI"),
MTK_FUNCTION(2, "IDDIG"),
MTK_FUNCTION(3, "AGPS_SYNC"),
MTK_FUNCTION(4, "TP_GPIO0_AO"),
MTK_FUNCTION(5, "MFG_JTAG_TDO"),
MTK_FUNCTION(6, "DFD_TDO"),
MTK_FUNCTION(7, "JTDO_SEL1")
),
MTK_PIN(
156, "GPIO156",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO156"),
MTK_FUNCTION(1, "SPI0_CSB"),
MTK_FUNCTION(2, "USB_DRVVBUS"),
MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
MTK_FUNCTION(4, "TP_GPIO1_AO"),
MTK_FUNCTION(5, "MFG_JTAG_TMS"),
MTK_FUNCTION(6, "DFD_TMS"),
MTK_FUNCTION(7, "JTMS_SEL1")
),
MTK_PIN(
157, "GPIO157",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO157"),
MTK_FUNCTION(1, "SPI0_MO"),
MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
MTK_FUNCTION(3, "CLKM0"),
MTK_FUNCTION(4, "TP_GPIO2_AO"),
MTK_FUNCTION(5, "MFG_JTAG_TDI"),
MTK_FUNCTION(6, "DFD_TDI"),
MTK_FUNCTION(7, "JTDI_SEL1")
),
MTK_PIN(
158, "GPIO158",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO158"),
MTK_FUNCTION(1, "SPI0_CLK"),
MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
MTK_FUNCTION(4, "TP_GPIO3_AO"),
MTK_FUNCTION(5, "MFG_JTAG_TCK"),
MTK_FUNCTION(6, "DFD_TCK_XI"),
MTK_FUNCTION(7, "JTCK_SEL1")
),
MTK_PIN(
159, "GPIO159",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO159"),
MTK_FUNCTION(1, "PWM4"),
MTK_FUNCTION(2, "CLKM1"),
MTK_FUNCTION(3, "ANT_SEL7"),
MTK_FUNCTION(5, "PTA_RXD"),
MTK_FUNCTION(6, "CONN_UART0_RXD")
),
MTK_PIN(
160, "GPIO160",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO160"),
MTK_FUNCTION(1, "CLKM0"),
MTK_FUNCTION(2, "PWM2"),
MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
MTK_FUNCTION(4, "TP_GPIO5_AO"),
MTK_FUNCTION(5, "AGPS_SYNC"),
MTK_FUNCTION(6, "DVFSRC_EXT_REQ")
),
MTK_PIN(
161, "GPIO161",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO161"),
MTK_FUNCTION(1, "SCL6"),
MTK_FUNCTION(2, "SCL_6306"),
MTK_FUNCTION(3, "TP_GPIO6_AO"),
MTK_FUNCTION(4, "KPCOL6"),
MTK_FUNCTION(5, "PTA_RXD"),
MTK_FUNCTION(6, "CONN_UART0_RXD")
),
MTK_PIN(
162, "GPIO162",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO162"),
MTK_FUNCTION(1, "SDA6"),
MTK_FUNCTION(2, "SDA_6306"),
MTK_FUNCTION(3, "TP_GPIO7_AO"),
MTK_FUNCTION(4, "KPCOL7"),
MTK_FUNCTION(5, "PTA_TXD"),
MTK_FUNCTION(6, "CONN_UART0_TXD")
),
MTK_PIN(
163, "GPIO163",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO163")
),
MTK_PIN(
164, "GPIO164",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO164")
),
MTK_PIN(
165, "GPIO165",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO165")
),
MTK_PIN(
166, "GPIO166",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO166")
),
MTK_PIN(
167, "GPIO167",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO167")
),
MTK_PIN(
168, "GPIO168",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO168")
),
MTK_PIN(
169, "GPIO169",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO169")
),
MTK_PIN(
170, "GPIO170",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO170")
),
MTK_PIN(
171, "GPIO171",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO171")
),
MTK_PIN(
172, "GPIO172",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO172")
),
MTK_PIN(
173, "GPIO173",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO173")
),
MTK_PIN(
174, "GPIO174",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO174")
),
MTK_PIN(
175, "GPIO175",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO175")
),
MTK_PIN(
176, "GPIO176",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO176")
),
MTK_PIN(
177, "GPIO177",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO177")
),
MTK_PIN(
178, "GPIO178",
MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO178")
),
MTK_PIN(
179, "GPIO179",
MTK_EINT_FUNCTION(0, 151),
DRV_GRP4,
MTK_FUNCTION(0, "GPIO179")
),
};
#endif /* __PINCTRL_MTK_MT6765_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLEX_PIPE_H_
#define _ICE_FLEX_PIPE_H_
#include "ice_type.h"
#define ICE_FDIR_REG_SET_SIZE 4
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
void
ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
enum ice_tunnel_type type);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_set_dvm_boost_entries(struct ice_hw *hw);
/* Rx parser PTYPE functions */
bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
bool ice_is_init_pkg_successful(enum ice_ddp_state state);
int ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
#endif /* _ICE_FLEX_PIPE_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
/* Marvell RVU Admin Function driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/bitfield.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
#include "mbox.h"
#include "rvu.h"
/* CPT PF device id */
#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 1ULL
/* Interrupt vector count of CPT RVU and RAS interrupts */
#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2
/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */
#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
u64 free_sts = 0, busy_sts = 0; \
typeof(rsp) _rsp = rsp; \
u32 e, i; \
\
for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
if (reg & 0x1) \
busy_sts |= 1ULL << i; \
\
if (reg & 0x2) \
free_sts |= 1ULL << i; \
} \
(_rsp)->busy_sts_##etype = busy_sts; \
(_rsp)->free_sts_##etype = free_sts; \
})
#define MAX_AE GENMASK_ULL(47, 32)
#define MAX_IE GENMASK_ULL(31, 16)
#define MAX_SE GENMASK_ULL(15, 0)
static u16 cpt_max_engines_get(struct rvu *rvu)
{
u16 max_ses, max_ies, max_aes;
u64 reg;
reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1);
max_ses = FIELD_GET(MAX_SE, reg);
max_ies = FIELD_GET(MAX_IE, reg);
max_aes = FIELD_GET(MAX_AE, reg);
return max_ses + max_ies + max_aes;
}
/* Number of flt interrupt vectors are depends on number of engines that the
* chip has. Each flt vector represents 64 engines.
*/
static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
{
int flt_vecs;
flt_vecs = DIV_ROUND_UP(max_engs, 64);
if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
}
return flt_vecs;
}
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
u64 reg, val;
int i, eng;
u8 grp;
reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
i = -1;
while ((i = find_next_bit((unsigned long *)®, 64, i + 1)) < 64) {
switch (vec) {
case 0:
eng = i;
break;
case 1:
eng = i + 64;
break;
case 2:
eng = i + 128;
break;
}
grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
/* Disable and enable the engine which triggers fault */
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
spin_lock(&rvu->cpt_intr_lock);
block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
val = val & 0x3;
if (val == 0x1 || val == 0x2)
block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
spin_unlock(&rvu->cpt_intr_lock);
}
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
return IRQ_HANDLED;
}
static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
}
static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
}
static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
}
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
u64 reg;
reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
return IRQ_HANDLED;
}
static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
u64 reg;
reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
return IRQ_HANDLED;
}
static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
irq_handler_t handler,
const char *name)
{
struct rvu *rvu = block->rvu;
int ret;
ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
name, block);
if (ret) {
dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
return ret;
}
WARN_ON(rvu->irq_allocated[irq_offs]);
rvu->irq_allocated[irq_offs] = true;
return 0;
}
static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
int i, flt_vecs;
u16 max_engs;
u8 nr;
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
/* Disable all CPT AF interrupts */
for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
nr = (max_engs > 64) ? 64 : max_engs;
max_engs -= nr;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i),
INTR_MASK(nr));
}
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
/* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */
for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++)
if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
}
}
static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int i, offs;
if (!is_block_implemented(rvu->hw, blkaddr))
return;
offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
if (!offs) {
dev_warn(rvu->dev,
"Failed to get CPT_AF_INT vector offsets\n");
return;
}
block = &hw->block[blkaddr];
if (!is_rvu_otx2(rvu))
return cpt_10k_unregister_interrupts(block, offs);
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
rvu->irq_allocated[offs + i] = false;
}
}
void rvu_cpt_unregister_interrupts(struct rvu *rvu)
{
cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
}
static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
int rvu_intr_vec, ras_intr_vec;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
irq_handler_t flt_fn;
int i, ret, flt_vecs;
u16 max_engs;
u8 nr;
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_10K_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT2:
flt_fn = rvu_cpt_af_flt2_intr_handler;
break;
}
ret = rvu_cpt_do_register_interrupt(block, off + i,
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
nr = (max_engs > 64) ? 64 : max_engs;
max_engs -= nr;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i),
INTR_MASK(nr));
}
rvu_intr_vec = flt_vecs;
ras_intr_vec = rvu_intr_vec + 1;
ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
return 0;
err:
rvu_cpt_unregister_interrupts(rvu);
return ret;
}
static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
irq_handler_t flt_fn;
int i, offs, ret = 0;
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
block = &hw->block[blkaddr];
offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
if (!offs) {
dev_warn(rvu->dev,
"Failed to get CPT_AF_INT vector offsets\n");
return 0;
}
if (!is_rvu_otx2(rvu))
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
}
ret = rvu_cpt_do_register_interrupt(block, offs + i,
flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
return 0;
err:
rvu_cpt_unregister_interrupts(rvu);
return ret;
}
int rvu_cpt_register_interrupts(struct rvu *rvu)
{
int ret;
ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
if (ret)
return ret;
return cpt_register_interrupts(rvu, BLKADDR_CPT1);
}
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
struct pci_dev *pdev;
domain_nr = pci_domain_nr(rvu->pdev->bus);
for (i = 0; i < rvu->hw->total_pfs; i++) {
pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
if (!pdev)
continue;
if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
cpt_pf_num = i;
put_device(&pdev->dev);
break;
}
put_device(&pdev->dev);
}
return cpt_pf_num;
}
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
if (pcifunc & RVU_PFVF_FUNC_MASK)
return false;
return true;
}
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return false;
return true;
}
static int validate_and_get_cpt_blkaddr(int req_blkaddr)
{
int blkaddr;
blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
return -EINVAL;
return blkaddr;
}
int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
struct cpt_lf_alloc_req_msg *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int cptlf, blkaddr;
int num_lfs, slot;
u64 val;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
if (req->eng_grpmsk == 0x0)
return CPT_AF_ERR_GRP_INVALID;
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
if (!num_lfs)
return CPT_AF_ERR_LF_INVALID;
/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
if (req->nix_pf_func) {
/* If default, use 'this' CPTLF's PFFUNC */
if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
req->nix_pf_func = pcifunc;
if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
}
/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
if (req->sso_pf_func) {
/* If default, use 'this' CPTLF's PFFUNC */
if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
req->sso_pf_func = pcifunc;
if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
}
for (slot = 0; slot < num_lfs; slot++) {
cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
if (!is_rvu_otx2(rvu)) {
if (req->ctx_ilen_valid)
val |= (req->ctx_ilen << 17);
else
val |= (CPT_CTX_ILEN << 17);
}
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
/* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
* on reset.
*/
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
val |= ((u64)req->nix_pf_func << 48 |
(u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
return 0;
}
static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
int num_lfs, cptlf, slot, err;
struct rvu_block *block;
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
if (!num_lfs)
return 0;
for (slot = 0; slot < num_lfs; slot++) {
cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
/* Perform teardown */
rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
/* Reset LF */
err = rvu_lf_reset(rvu, block, cptlf);
if (err) {
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, cptlf);
}
}
return 0;
}
int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
int ret;
ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
if (ret)
return ret;
if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
return ret;
}
static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
struct cpt_inline_ipsec_cfg_msg *req)
{
u16 sso_pf_func = req->sso_pf_func;
u8 nix_sel;
u64 val;
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
if (req->enable && (val & BIT_ULL(16))) {
/* IPSec inline outbound path is already enabled for a given
* CPT LF, HRM states that inline inbound & outbound paths
* must not be enabled at the same time for a given CPT LF
*/
return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
}
/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
/* Enable CPT LF for IPsec inline inbound operations */
if (req->enable)
val |= BIT_ULL(9);
else
val &= ~BIT_ULL(9);
val |= (u64)nix_sel << 8;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
if (sso_pf_func) {
/* Set SSO_PF_FUNC */
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val |= (u64)sso_pf_func << 32;
val |= (u64)req->nix_pf_func << 48;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
if (req->sso_pf_func_ovrd)
/* Set SSO_PF_FUNC_OVRD for inline IPSec */
rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
/* Configure the X2P Link register with the cpt base channel number and
* range of channels it should propagate to X2P
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
}
return 0;
}
static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
struct cpt_inline_ipsec_cfg_msg *req)
{
u16 nix_pf_func = req->nix_pf_func;
int nix_blkaddr;
u8 nix_sel;
u64 val;
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
if (req->enable && (val & BIT_ULL(9))) {
/* IPSec inline inbound path is already enabled for a given
* CPT LF, HRM states that inline inbound & outbound paths
* must not be enabled at the same time for a given CPT LF
*/
return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
}
/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
/* Enable CPT LF for IPsec inline outbound operations */
if (req->enable)
val |= BIT_ULL(16);
else
val &= ~BIT_ULL(16);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
if (nix_pf_func) {
/* Set NIX_PF_FUNC */
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val |= (u64)nix_pf_func << 48;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
val |= (u64)nix_sel << 8;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
}
return 0;
}
int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
struct cpt_inline_ipsec_cfg_msg *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int cptlf, blkaddr, ret;
u16 actual_slot;
blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
req->slot, &actual_slot);
if (blkaddr < 0)
return CPT_AF_ERR_LF_INVALID;
block = &rvu->hw->block[blkaddr];
cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
switch (req->dir) {
case CPT_INLINE_INBOUND:
ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
break;
case CPT_INLINE_OUTBOUND:
ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
break;
default:
return CPT_AF_ERR_PARAM;
}
return ret;
}
static bool validate_and_update_reg_offset(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
u64 *reg_offset)
{
u64 offset = req->reg_offset;
int blkaddr, num_lfs, lf;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return false;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
(offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
if (offset & 7)
return false;
lf = (offset & 0xFFF) >> 3;
block = &rvu->hw->block[blkaddr];
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
if (lf >= num_lfs)
/* Slot is not valid for that PF/VF */
return false;
/* Translate local LF used by VFs to global CPT LF */
lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
req->hdr.pcifunc, lf);
if (lf < 0)
return false;
/* Translate local LF's offset to global CPT LF's offset to
* access LFX register.
*/
*reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
return true;
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
case CPT_AF_CTX_FLUSH_TIMER:
case CPT_AF_RXC_CFG1:
return true;
}
switch (offset & 0xFF000) {
case CPT_AF_EXEX_STS(0):
case CPT_AF_EXEX_CTL(0):
case CPT_AF_EXEX_CTL2(0):
case CPT_AF_EXEX_UCODE_BASE(0):
if (offset & 7)
return false;
break;
default:
return false;
}
return true;
}
return false;
}
int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
struct cpt_rd_wr_reg_msg *rsp)
{
u64 offset = req->reg_offset;
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
if (!validate_and_update_reg_offset(rvu, req, &offset))
return CPT_AF_ERR_ACCESS_DENIED;
rsp->reg_offset = req->reg_offset;
rsp->ret_val = req->ret_val;
rsp->is_write = req->is_write;
if (req->is_write)
rvu_write64(rvu, blkaddr, offset, req->val);
else
rsp->val = rvu_read64(rvu, blkaddr, offset);
return 0;
}
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
if (is_rvu_otx2(rvu))
return;
rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_AOP_LATENCY_PC);
rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_IFETCH_LATENCY_PC);
rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
if (!hw->cap.cpt_rxc)
return;
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
u16 max_ses, max_ies, max_aes;
u32 e_min = 0, e_max = 0;
u64 reg;
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
max_aes = (reg >> 32) & 0xffff;
/* Get AE status */
e_min = max_ses + max_ies;
e_max = max_ses + max_ies + max_aes;
cpt_get_eng_sts(e_min, e_max, rsp, ae);
/* Get SE status */
e_min = 0;
e_max = max_ses;
cpt_get_eng_sts(e_min, e_max, rsp, se);
/* Get IE status */
e_min = max_ses;
e_max = max_ses + max_ies;
cpt_get_eng_sts(e_min, e_max, rsp, ie);
}
int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
struct cpt_sts_rsp *rsp)
{
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
get_ctx_pc(rvu, rsp, blkaddr);
/* Get CPT engines status */
get_eng_sts(rvu, rsp, blkaddr);
/* Read CPT instruction PC registers */
rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
CPT_AF_ACTIVE_CYCLES_PC);
rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
return 0;
}
#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
if (save) {
/* Save older config */
dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
}
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
}
int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
struct cpt_rxc_time_cfg_req *req,
struct msg_rsp *rsp)
{
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int cptlf, blkaddr, ret;
u16 actual_slot;
u64 ctl, ctl2;
blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
req->slot, &actual_slot);
if (blkaddr < 0)
return CPT_AF_ERR_LF_INVALID;
block = &rvu->hw->block[blkaddr];
cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
ret = rvu_lf_reset(rvu, block, cptlf);
if (ret)
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, cptlf);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
return 0;
}
int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
struct cpt_flt_eng_info_rsp *rsp)
{
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
int flt_vecs;
u16 max_engs;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
for (vec = 0; vec < flt_vecs; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
if (req->reset) {
block->cpt_flt_eng_map[vec] = 0x0;
block->cpt_rcvrd_eng_map[vec] = 0x0;
}
spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
}
return 0;
}
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req, prev;
struct rvu_hwinfo *hw = rvu->hw;
int timeout = 2000;
u64 reg;
if (!hw->cap.cpt_rxc)
return;
/* Set time limit to minimum values, so that rxc entries will be
* flushed out quickly.
*/
req.step = 1;
req.zombie_thres = 1;
req.zombie_limit = 1;
req.active_thres = 1;
req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
udelay(1);
if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
timeout = 2000;
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
udelay(1);
if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
/* Restore config */
cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
#define INFLIGHT GENMASK_ULL(8, 0)
#define GRB_CNT GENMASK_ULL(39, 32)
#define GWB_CNT GENMASK_ULL(47, 40)
#define XQ_XOR GENMASK_ULL(63, 63)
#define DQPTR GENMASK_ULL(19, 0)
#define NQPTR GENMASK_ULL(51, 32)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
int timeout = 1000000;
u64 inprog, inst_ptr;
u64 qsize, pending;
int i = 0;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
inprog |= BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
qsize = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
inst_ptr = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
FIELD_GET(NQPTR, inst_ptr) -
FIELD_GET(DQPTR, inst_ptr);
udelay(1);
timeout--;
} while ((pending != 0) && (timeout != 0));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
timeout = 1000000;
/* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
(FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
timeout--;
}
} while ((timeout != 0) && (i < 10));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
/* Wait for 2 us to flush all queue writes to memory */
udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
u64 reg;
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
mutex_unlock(&rvu->alias_lock);
return 0;
}
#define CPT_RES_LEN 16
#define CPT_SE_IE_EGRP 1ULL
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
int cpt_pf_num = rvu->cpt_pf_num;
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
u8 cpt_idx;
u64 *inst;
u16 *res;
int rc;
res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
if (!res)
return -ENOMEM;
res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(rvu->dev, res_daddr)) {
dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
rc = -EFAULT;
goto res_free;
}
*res = 0xFFFF;
/* Send mbox message to CPT PF */
req = (struct cpt_inst_lmtst_req *)
otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
cpt_pf_num, sizeof(*req),
sizeof(struct msg_rsp));
if (!req) {
rc = -ENOMEM;
goto res_daddr_unmap;
}
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
inst = req->inst;
/* Prepare CPT_INST_S */
inst[0] = 0;
inst[1] = res_daddr;
/* AF PF FUNC */
inst[2] = 0;
/* Set QORD */
inst[3] = 1;
inst[4] = 0;
inst[5] = 0;
inst[6] = 0;
/* Set EGRP */
inst[7] = CPT_SE_IE_EGRP << 61;
/* Subtract 1 from the NIX-CPT credit count to preserve
* credit counts.
*/
cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
BIT_ULL(22) - 1);
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
if (rc)
dev_warn(rvu->dev, "notification to pf %d failed\n",
cpt_pf_num);
/* Wait for CPT instruction to be completed */
do {
mdelay(1);
if (*res == 0xFFFF)
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
res_daddr_unmap:
dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
res_free:
kfree(res);
return 0;
}
#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
{
int nix_blkaddr, blkaddr;
u16 max_ctx_entries, i;
int slot = 0, num_lfs;
u64 reg, cam_data;
int rc;
nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (nix_blkaddr < 0)
return -EINVAL;
if (is_rvu_otx2(rvu))
return 0;
blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
/* Submit CPT_INST_S to track when all packets have been
* flushed through for the NIX PF FUNC in inline inbound case.
*/
rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
if (rc)
return rc;
/* Wait for rxc entries to be flushed out */
cpt_rxc_teardown(rvu, blkaddr);
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
max_ctx_entries = (reg >> 48) & 0xFFF;
mutex_lock(&rvu->rsrc_lock);
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
blkaddr);
if (num_lfs == 0) {
dev_warn(rvu->dev, "CPT LF is not configured\n");
goto unlock;
}
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
FIELD_GET(CTX_CAM_CPTR, cam_data)) {
reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
reg);
}
}
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32)
int rvu_cpt_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
u64 reg_val;
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) &&
!is_cn10kb(rvu))
hw->cap.cpt_rxc = true;
if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) {
/* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect
* inline inbound peak performance
*/
reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1);
reg_val &= ~MAX_RXC_ICB_CNT;
reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT,
CPT_DFLT_MAX_RXC_ICB_CNT);
rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val);
}
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
/* pci_fire.c: Sun4u platform PCI-E controller support.
*
* Copyright (C) 2007 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/numa.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "pci_impl.h"
#define DRIVER_NAME "fire"
#define PFX DRIVER_NAME ": "
#define FIRE_IOMMU_CONTROL 0x40000UL
#define FIRE_IOMMU_TSBBASE 0x40008UL
#define FIRE_IOMMU_FLUSH 0x40100UL
#define FIRE_IOMMU_FLUSHINV 0x40108UL
static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
u32 vdma[2], dma_mask;
u64 control;
int tsbsize, err;
/* No virtual-dma property on these guys, use largest size. */
vdma[0] = 0xc0000000; /* base */
vdma[1] = 0x40000000; /* size */
dma_mask = 0xffffffff;
tsbsize = 128;
/* Register addresses. */
iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
/* We use the main control/status register of FIRE as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
/*
* Invalidate TLB Entries.
*/
upa_writeq(~(u64)0, iommu->iommu_flushinv);
err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control |= (0x00000400 /* TSB cache snoop enable */ |
0x00000300 /* Cache mode */ |
0x00000002 /* Bypass enable */ |
0x00000001 /* Translation enable */);
upa_writeq(control, iommu->iommu_control);
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_msiq_entry {
u64 word0;
#define MSIQ_WORD0_RESV 0x8000000000000000UL
#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
#define MSIQ_WORD0_LEN_SHIFT 46
#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
#define MSIQ_WORD0_ADDR0_SHIFT 32
#define MSIQ_WORD0_RID 0x00000000ffff0000UL
#define MSIQ_WORD0_RID_SHIFT 16
#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
#define MSIQ_WORD0_DATA0_SHIFT 0
#define MSIQ_TYPE_MSG 0x6
#define MSIQ_TYPE_MSI32 0xb
#define MSIQ_TYPE_MSI64 0xf
u64 word1;
#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
#define MSIQ_WORD1_ADDR1_SHIFT 16
#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
#define MSIQ_WORD1_DATA1_SHIFT 0
u64 resv[6];
};
/* All MSI registers are offset from pbm->pbm_regs */
#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
#define MSI_MAP_VALID 0x8000000000000000UL
#define MSI_MAP_EQWR_N 0x4000000000000000UL
#define MSI_MAP_EQNUM 0x000000000000003fUL
#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
#define IMONDO_DATA0 0x02C000UL
#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
#define IMONDO_DATA1 0x02C008UL
#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
#define MSI_32BIT_ADDR 0x034000UL
#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
#define MSI_64BIT_ADDR 0x034008UL
#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
*head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi)
{
unsigned long type_fmt, type, msi_num;
struct pci_msiq_entry *base, *ep;
base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
ep = &base[*head];
if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
return 0;
type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
MSIQ_WORD0_FMT_TYPE_SHIFT);
type = (type_fmt >> 3);
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
MSIQ_WORD0_DATA0_SHIFT);
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
/* Clear the entry. */
ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
/* Go to next entry in ring. */
(*head)++;
if (*head >= pbm->msiq_ent_count)
*head = 0;
return 1;
}
static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val &= ~(MSI_MAP_EQNUM);
val |= msiqid;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val |= MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val &= ~MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long pages, order, i;
order = get_order(512 * 1024);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
__pa(pbm->msi_queues)),
pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
for (i = 0; i < pbm->msiq_num; i++) {
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
}
return 0;
}
static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long pages, order;
order = get_order(512 * 1024);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
unsigned int irq;
int fixup;
u64 val;
imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
/* XXX iterate amongst the 4 IRQ controllers XXX */
int_ctrlr = (1UL << 6);
val = upa_readq(imap_reg);
val |= (1UL << 63) | int_ctrlr;
upa_writeq(val, imap_reg);
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
irq = build_irq(fixup, iclr_reg, imap_reg);
if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
.get_head = pci_fire_get_head,
.dequeue_msi = pci_fire_dequeue_msi,
.set_head = pci_fire_set_head,
.msi_setup = pci_fire_msi_setup,
.msi_teardown = pci_fire_msi_teardown,
.msiq_alloc = pci_fire_msiq_alloc,
.msiq_free = pci_fire_msiq_free,
.msiq_build_irq = pci_fire_msiq_build_irq,
};
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
#define FIRE_FATAL_RESET_CTL 0x471028UL
#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
#define FIRE_CORE_INTR_ENABLE 0x471800UL
/* Based at pbm->pbm_regs */
#define FIRE_TLU_CTRL 0x80000UL
#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
#define FIRE_TLU_DEV_CTRL 0x90008UL
#define FIRE_TLU_LINK_CTRL 0x90020UL
#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
#define FIRE_LPU_RESET 0xe2008UL
#define FIRE_LPU_LLCFG 0xe2200UL
#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
#define FIRE_LPU_TXL_FIFOP 0xe2430UL
#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
#define FIRE_DMC_IENAB 0x31800UL
#define FIRE_DMC_DBG_SEL_A 0x53000UL
#define FIRE_DMC_DBG_SEL_B 0x53008UL
#define FIRE_PEC_IENAB 0x51800UL
static void pci_fire_hw_init(struct pci_pbm_info *pbm)
{
u64 val;
upa_writeq(FIRE_PARITY_ENAB,
pbm->controller_regs + FIRE_PARITY_CONTROL);
upa_writeq((FIRE_FATAL_RESET_SPARE |
FIRE_FATAL_RESET_MB |
FIRE_FATAL_RESET_CPE |
FIRE_FATAL_RESET_APE |
FIRE_FATAL_RESET_PIO |
FIRE_FATAL_RESET_JW |
FIRE_FATAL_RESET_JI |
FIRE_FATAL_RESET_JR),
pbm->controller_regs + FIRE_FATAL_RESET_CTL);
upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
val |= (FIRE_TLU_CTRL_TIM |
FIRE_TLU_CTRL_QDET |
FIRE_TLU_CTRL_CFG);
upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
upa_writeq(((0xffff << 16) | (0x0000 << 0)),
pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
upa_writeq((2 << 16) | (140 << 8),
pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
}
static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 portid)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
pbm->index = pci_num_pbms++;
pbm->portid = portid;
pbm->op = op;
pbm->name = dp->full_name;
regs = of_get_property(dp, "reg", NULL);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
pci_fire_hw_init(pbm);
err = pci_fire_pbm_iommu_init(pbm);
if (err)
return err;
pci_fire_msi_init(pbm);
pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
/* XXX register error interrupt handlers XXX */
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
return 0;
}
static int fire_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
int err;
portid = of_getintprop_default(dp, "portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
goto out_err;
}
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
pbm->iommu = iommu;
err = pci_fire_pbm_init(pbm, op, portid);
if (err)
goto out_free_iommu;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id fire_match[] = {
{
.name = "pci",
.compatible = "pciex108e,80f0",
},
{},
};
static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fire_match,
},
.probe = fire_probe,
};
static int __init fire_init(void)
{
return platform_driver_register(&fire_driver);
}
subsys_initcall(fire_init);
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Trace events for the ChromeOS Embedded Controller
*
* Copyright 2019 Google LLC.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cros_ec
#if !defined(_CROS_EC_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _CROS_EC_TRACE_H_
#include <linux/bits.h>
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/tracepoint.h>
TRACE_EVENT(cros_ec_request_start,
TP_PROTO(struct cros_ec_command *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, version)
__field(uint32_t, offset)
__field(uint32_t, command)
__field(uint32_t, outsize)
__field(uint32_t, insize)
),
TP_fast_assign(
__entry->version = cmd->version;
__entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
),
TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u",
__entry->version, __entry->offset,
__print_symbolic(__entry->command, EC_CMDS),
__entry->outsize, __entry->insize)
);
TRACE_EVENT(cros_ec_request_done,
TP_PROTO(struct cros_ec_command *cmd, int retval),
TP_ARGS(cmd, retval),
TP_STRUCT__entry(
__field(uint32_t, version)
__field(uint32_t, offset)
__field(uint32_t, command)
__field(uint32_t, outsize)
__field(uint32_t, insize)
__field(uint32_t, result)
__field(int, retval)
),
TP_fast_assign(
__entry->version = cmd->version;
__entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
__entry->result = cmd->result;
__entry->retval = retval;
),
TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u, ec result: %s, retval: %u",
__entry->version, __entry->offset,
__print_symbolic(__entry->command, EC_CMDS),
__entry->outsize, __entry->insize,
__print_symbolic(__entry->result, EC_RESULT),
__entry->retval)
);
#endif /* _CROS_EC_TRACE_H_ */
/* this part must be outside header guard */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE cros_ec_trace
#include <trace/define_trace.h>
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) STMicroelectronics 2018
// Author: Pascal Paillet <[email protected]> for STMicroelectronics.
#include <linux/interrupt.h>
#include <linux/mfd/stpmic1.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <dt-bindings/mfd/st,stpmic1.h>
/**
* struct stpmic1_regulator_cfg - this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
* @icc_reg: icc register address
* @icc_mask: icc register mask
*/
struct stpmic1_regulator_cfg {
struct regulator_desc desc;
u8 mask_reset_reg;
u8 mask_reset_mask;
u8 icc_reg;
u8 icc_mask;
};
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
bool enable);
static unsigned int stpmic1_map_mode(unsigned int mode);
enum {
STPMIC1_BUCK1 = 0,
STPMIC1_BUCK2 = 1,
STPMIC1_BUCK3 = 2,
STPMIC1_BUCK4 = 3,
STPMIC1_LDO1 = 4,
STPMIC1_LDO2 = 5,
STPMIC1_LDO3 = 6,
STPMIC1_LDO4 = 7,
STPMIC1_LDO5 = 8,
STPMIC1_LDO6 = 9,
STPMIC1_VREF_DDR = 10,
STPMIC1_BOOST = 11,
STPMIC1_VBUS_OTG = 12,
STPMIC1_SW_OUT = 13,
};
/* Enable time worst case is 5000mV/(2250uV/uS) */
#define PMIC_ENABLE_TIME_US 2200
/* Ramp delay worst case is (2250uV/uS) */
#define PMIC_RAMP_DELAY 2200
static const struct linear_range buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000),
REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0),
};
static const struct linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
REGULATOR_LINEAR_RANGE(1150000, 22, 23, 0),
REGULATOR_LINEAR_RANGE(1200000, 24, 25, 0),
REGULATOR_LINEAR_RANGE(1250000, 26, 27, 0),
REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
};
static const struct linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
REGULATOR_LINEAR_RANGE(1300000, 28, 31, 0),
REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000),
REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
};
static const struct linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000),
REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
};
static const struct linear_range ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
static const struct linear_range ldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
};
static const struct linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
/* with index 31 LDO3 is in DDR mode */
REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
};
static const struct linear_range ldo5_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
};
static const struct linear_range ldo6_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
static const struct regulator_ops stpmic1_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
static const struct regulator_ops stpmic1_ldo3_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_iterate,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
static const struct regulator_ops stpmic1_buck_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_pull_down = regulator_set_pull_down_regmap,
.set_mode = stpmic1_set_mode,
.get_mode = stpmic1_get_mode,
.set_over_current_protection = stpmic1_set_icc,
};
static const struct regulator_ops stpmic1_vref_ddr_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static const struct regulator_ops stpmic1_boost_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
static const struct regulator_ops stpmic1_switch_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_over_current_protection = stpmic1_set_icc,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
#define REG_LDO(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 32, \
.ops = &stpmic1_ldo_ops, \
.linear_ranges = base ## _ranges, \
.n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = ids##_ACTIVE_CR, \
.vsel_mask = LDO_VOLTAGE_MASK, \
.enable_reg = ids##_ACTIVE_CR, \
.enable_mask = LDO_ENABLE_MASK, \
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
#define REG_LDO3(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 32, \
.ops = &stpmic1_ldo3_ops, \
.linear_ranges = ldo3_ranges, \
.n_linear_ranges = ARRAY_SIZE(ldo3_ranges), \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = LDO3_ACTIVE_CR, \
.vsel_mask = LDO_VOLTAGE_MASK, \
.enable_reg = LDO3_ACTIVE_CR, \
.enable_mask = LDO_ENABLE_MASK, \
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.ramp_delay = PMIC_RAMP_DELAY, \
.bypass_reg = LDO3_ACTIVE_CR, \
.bypass_mask = LDO_BYPASS_MASK, \
.bypass_val_on = LDO_BYPASS_MASK, \
.bypass_val_off = 0, \
.supply_name = #base, \
}
#define REG_LDO4(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
.ops = &stpmic1_ldo4_fixed_regul_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 3300000, \
.fixed_uV = 3300000, \
.enable_reg = LDO4_ACTIVE_CR, \
.enable_mask = LDO_ENABLE_MASK, \
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
#define REG_BUCK(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.ops = &stpmic1_buck_ops, \
.n_voltages = 64, \
.linear_ranges = base ## _ranges, \
.n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = ids##_ACTIVE_CR, \
.vsel_mask = BUCK_VOLTAGE_MASK, \
.enable_reg = ids##_ACTIVE_CR, \
.enable_mask = BUCK_ENABLE_MASK, \
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.ramp_delay = PMIC_RAMP_DELAY, \
.of_map_mode = stpmic1_map_mode, \
.pull_down_reg = ids##_PULL_DOWN_REG, \
.pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
#define REG_VREF_DDR(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
.ops = &stpmic1_vref_ddr_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 500000, \
.fixed_uV = 500000, \
.enable_reg = VREF_DDR_ACTIVE_CR, \
.enable_mask = BUCK_ENABLE_MASK, \
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
}
#define REG_BOOST(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
.ops = &stpmic1_boost_regul_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 0, \
.fixed_uV = 5000000, \
.enable_reg = BST_SW_CR, \
.enable_mask = BOOST_ENABLED, \
.enable_val = BOOST_ENABLED, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
}
#define REG_VBUS_OTG(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
.ops = &stpmic1_switch_regul_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 0, \
.fixed_uV = 5000000, \
.enable_reg = BST_SW_CR, \
.enable_mask = USBSW_OTG_SWITCH_ENABLED, \
.enable_val = USBSW_OTG_SWITCH_ENABLED, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
.active_discharge_reg = BST_SW_CR, \
.active_discharge_mask = VBUS_OTG_DISCHARGE, \
.active_discharge_on = VBUS_OTG_DISCHARGE, \
}
#define REG_SW_OUT(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
.ops = &stpmic1_switch_regul_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 0, \
.fixed_uV = 5000000, \
.enable_reg = BST_SW_CR, \
.enable_mask = SWIN_SWOUT_ENABLED, \
.enable_val = SWIN_SWOUT_ENABLED, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
.active_discharge_reg = BST_SW_CR, \
.active_discharge_mask = SW_OUT_DISCHARGE, \
.active_discharge_on = SW_OUT_DISCHARGE, \
}
static const struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
[STPMIC1_BUCK1] = {
.desc = REG_BUCK(BUCK1, buck1),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(0),
.mask_reset_reg = BUCKS_MASK_RESET_CR,
.mask_reset_mask = BIT(0),
},
[STPMIC1_BUCK2] = {
.desc = REG_BUCK(BUCK2, buck2),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(1),
.mask_reset_reg = BUCKS_MASK_RESET_CR,
.mask_reset_mask = BIT(1),
},
[STPMIC1_BUCK3] = {
.desc = REG_BUCK(BUCK3, buck3),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(2),
.mask_reset_reg = BUCKS_MASK_RESET_CR,
.mask_reset_mask = BIT(2),
},
[STPMIC1_BUCK4] = {
.desc = REG_BUCK(BUCK4, buck4),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(3),
.mask_reset_reg = BUCKS_MASK_RESET_CR,
.mask_reset_mask = BIT(3),
},
[STPMIC1_LDO1] = {
.desc = REG_LDO(LDO1, ldo1),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(0),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(0),
},
[STPMIC1_LDO2] = {
.desc = REG_LDO(LDO2, ldo2),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(1),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(1),
},
[STPMIC1_LDO3] = {
.desc = REG_LDO3(LDO3, ldo3),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(2),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(2),
},
[STPMIC1_LDO4] = {
.desc = REG_LDO4(LDO4, ldo4),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(3),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(3),
},
[STPMIC1_LDO5] = {
.desc = REG_LDO(LDO5, ldo5),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(4),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(4),
},
[STPMIC1_LDO6] = {
.desc = REG_LDO(LDO6, ldo6),
.icc_reg = LDOS_ICCTO_CR,
.icc_mask = BIT(5),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(5),
},
[STPMIC1_VREF_DDR] = {
.desc = REG_VREF_DDR(VREF_DDR, vref_ddr),
.mask_reset_reg = LDOS_MASK_RESET_CR,
.mask_reset_mask = BIT(6),
},
[STPMIC1_BOOST] = {
.desc = REG_BOOST(BOOST, boost),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(6),
},
[STPMIC1_VBUS_OTG] = {
.desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(4),
},
[STPMIC1_SW_OUT] = {
.desc = REG_SW_OUT(SW_OUT, pwr_sw2),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(5),
},
};
static unsigned int stpmic1_map_mode(unsigned int mode)
{
switch (mode) {
case STPMIC1_BUCK_MODE_NORMAL:
return REGULATOR_MODE_NORMAL;
case STPMIC1_BUCK_MODE_LP:
return REGULATOR_MODE_STANDBY;
default:
return REGULATOR_MODE_INVALID;
}
}
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
{
int value;
struct regmap *regmap = rdev_get_regmap(rdev);
regmap_read(regmap, rdev->desc->enable_reg, &value);
if (value & STPMIC1_BUCK_MODE_LP)
return REGULATOR_MODE_STANDBY;
return REGULATOR_MODE_NORMAL;
}
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
int value;
struct regmap *regmap = rdev_get_regmap(rdev);
switch (mode) {
case REGULATOR_MODE_NORMAL:
value = STPMIC1_BUCK_MODE_NORMAL;
break;
case REGULATOR_MODE_STANDBY:
value = STPMIC1_BUCK_MODE_LP;
break;
default:
return -EINVAL;
}
return regmap_update_bits(regmap, rdev->desc->enable_reg,
STPMIC1_BUCK_MODE_LP, value);
}
static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
bool enable)
{
struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev_get_regmap(rdev);
/*
* The code seems like one bit in a register controls whether OCP is
* enabled. So we might be able to turn it off here is if that
* was requested. I won't support this because I don't have the HW.
* Feel free to try and implement if you have the HW and need kernel
* to disable this.
*
* Also, I don't know if limit can be configured or if we support
* error/warning instead of protect. So I just keep existing logic
* and assume no.
*/
if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
return -EINVAL;
/* enable switch off in case of over current */
return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask,
cfg->icc_mask);
}
static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
/* Send an overcurrent notification */
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
return IRQ_HANDLED;
}
#define MATCH(_name, _id) \
[STPMIC1_##_id] = { \
.name = #_name, \
.desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \
}
static struct of_regulator_match stpmic1_matches[] = {
MATCH(buck1, BUCK1),
MATCH(buck2, BUCK2),
MATCH(buck3, BUCK3),
MATCH(buck4, BUCK4),
MATCH(ldo1, LDO1),
MATCH(ldo2, LDO2),
MATCH(ldo3, LDO3),
MATCH(ldo4, LDO4),
MATCH(ldo5, LDO5),
MATCH(ldo6, LDO6),
MATCH(vref_ddr, VREF_DDR),
MATCH(boost, BOOST),
MATCH(pwr_sw1, VBUS_OTG),
MATCH(pwr_sw2, SW_OUT),
};
static int stpmic1_regulator_register(struct platform_device *pdev, int id,
struct of_regulator_match *match,
const struct stpmic1_regulator_cfg *cfg)
{
struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent);
struct regulator_dev *rdev;
struct regulator_config config = {};
int ret = 0;
int irq;
config.dev = &pdev->dev;
config.init_data = match->init_data;
config.of_node = match->of_node;
config.regmap = pmic_dev->regmap;
config.driver_data = (void *)cfg;
rdev = devm_regulator_register(&pdev->dev, &cfg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
cfg->desc.name);
return PTR_ERR(rdev);
}
/* set mask reset */
if (of_property_read_bool(config.of_node, "st,mask-reset") &&
cfg->mask_reset_reg != 0) {
ret = regmap_update_bits(pmic_dev->regmap,
cfg->mask_reset_reg,
cfg->mask_reset_mask,
cfg->mask_reset_mask);
if (ret) {
dev_err(&pdev->dev, "set mask reset failed\n");
return ret;
}
}
/* setup an irq handler for over-current detection */
irq = of_irq_get(config.of_node, 0);
if (irq > 0) {
ret = devm_request_threaded_irq(&pdev->dev,
irq, NULL,
stpmic1_curlim_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
pdev->name, rdev);
if (ret) {
dev_err(&pdev->dev, "Request IRQ failed\n");
return ret;
}
}
return 0;
}
static int stpmic1_regulator_probe(struct platform_device *pdev)
{
int i, ret;
ret = of_regulator_match(&pdev->dev, pdev->dev.of_node, stpmic1_matches,
ARRAY_SIZE(stpmic1_matches));
if (ret < 0) {
dev_err(&pdev->dev,
"Error in PMIC regulator device tree node");
return ret;
}
for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) {
ret = stpmic1_regulator_register(pdev, i, &stpmic1_matches[i],
&stpmic1_regulator_cfgs[i]);
if (ret < 0)
return ret;
}
dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n");
return 0;
}
static const struct of_device_id of_pmic_regulator_match[] = {
{ .compatible = "st,stpmic1-regulators" },
{ },
};
MODULE_DEVICE_TABLE(of, of_pmic_regulator_match);
static struct platform_driver stpmic1_regulator_driver = {
.driver = {
.name = "stpmic1-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(of_pmic_regulator_match),
},
.probe = stpmic1_regulator_probe,
};
module_platform_driver(stpmic1_regulator_driver);
MODULE_DESCRIPTION("STPMIC1 PMIC voltage regulator driver");
MODULE_AUTHOR("Pascal Paillet <[email protected]>");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <pthread.h>
int main(void)
{
pthread_barrier_t barrier;
pthread_barrier_init(&barrier, NULL, 1);
pthread_barrier_wait(&barrier);
return pthread_barrier_destroy(&barrier);
}
|
#define SUBPROGS
#include "test_cls_redirect.c"
|
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (C) IBM Corporation 2020
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/fsi.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#define FSI_ENGID_SPI 0x23
#define FSI_MBOX_ROOT_CTRL_8 0x2860
#define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
#define FSI2SPI_DATA0 0x00
#define FSI2SPI_DATA1 0x04
#define FSI2SPI_CMD 0x08
#define FSI2SPI_CMD_WRITE BIT(31)
#define FSI2SPI_RESET 0x18
#define FSI2SPI_STATUS 0x1c
#define FSI2SPI_STATUS_ANY_ERROR BIT(31)
#define FSI2SPI_IRQ 0x20
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_TIMEOUT_MS 1000
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
#define SPI_FSI_ERROR 0x0
#define SPI_FSI_COUNTER_CFG 0x1
#define SPI_FSI_CFG1 0x2
#define SPI_FSI_CLOCK_CFG 0x3
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
#define SPI_FSI_MMAP 0x4
#define SPI_FSI_DATA_TX 0x5
#define SPI_FSI_DATA_RX 0x6
#define SPI_FSI_SEQUENCE 0x7
#define SPI_FSI_SEQUENCE_STOP 0x00
#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
#define SPI_FSI_STATUS 0x8
#define SPI_FSI_STATUS_ERROR \
(GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
#define SPI_FSI_STATUS_ANY_ERROR \
(SPI_FSI_STATUS_ERROR | \
SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
SPI_FSI_STATUS_RDR_OVERRUN)
#define SPI_FSI_PORT_CTRL 0x9
struct fsi2spi {
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
struct mutex lock; /* lock access to the device */
};
struct fsi_spi {
struct device *dev; /* SPI controller device */
struct fsi2spi *bridge; /* FSI2SPI device */
u32 base;
};
struct fsi_spi_sequence {
int bit;
u64 data;
};
static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
{
int rc;
u32 root_ctrl_8;
__be32 root_ctrl_8_be;
rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
sizeof(root_ctrl_8_be));
if (rc)
return rc;
root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
return 0;
return -ENOLINK;
}
static int fsi_spi_check_status(struct fsi_spi *ctx)
{
int rc;
u32 sts;
__be32 sts_be;
rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be,
sizeof(sts_be));
if (rc)
return rc;
sts = be32_to_cpu(sts_be);
if (sts & FSI2SPI_STATUS_ANY_ERROR) {
dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
return -EIO;
}
return 0;
}
static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
{
int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
struct fsi2spi *bridge = ctx->bridge;
*value = 0ULL;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
rc = mutex_lock_interruptible(&bridge->lock);
if (rc)
return rc;
cmd_be = cpu_to_be32(cmd);
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
sizeof(cmd_be));
if (rc)
goto unlock;
rc = fsi_spi_check_status(ctx);
if (rc)
goto unlock;
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
*value |= (u64)be32_to_cpu(data_be) << 32;
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
*value |= (u64)be32_to_cpu(data_be);
dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
unlock:
mutex_unlock(&bridge->lock);
return rc;
}
static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
{
int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
struct fsi2spi *bridge = ctx->bridge;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
rc = mutex_lock_interruptible(&bridge->lock);
if (rc)
return rc;
dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
data_be = cpu_to_be32(upper_32_bits(value));
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
data_be = cpu_to_be32(lower_32_bits(value));
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
sizeof(cmd_be));
if (rc)
goto unlock;
rc = fsi_spi_check_status(ctx);
unlock:
mutex_unlock(&bridge->lock);
return rc;
}
static int fsi_spi_data_in(u64 in, u8 *rx, int len)
{
int i;
int num_bytes = min(len, 8);
for (i = 0; i < num_bytes; ++i)
rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
return num_bytes;
}
static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
{
int i;
int num_bytes = min(len, 8);
u8 *out_bytes = (u8 *)out;
/* Unused bytes of the tx data should be 0. */
*out = 0ULL;
for (i = 0; i < num_bytes; ++i)
out_bytes[8 - (i + 1)] = tx[i];
return num_bytes;
}
static int fsi_spi_reset(struct fsi_spi *ctx)
{
int rc;
dev_dbg(ctx->dev, "Resetting SPI controller.\n");
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
SPI_FSI_CLOCK_CFG_RESET1);
if (rc)
return rc;
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
SPI_FSI_CLOCK_CFG_RESET2);
if (rc)
return rc;
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
}
static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir)
{
int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status);
if (rc)
return rc;
if (*status & SPI_FSI_STATUS_ANY_ERROR) {
dev_err(ctx->dev, "%s error: %016llx\n", dir, *status);
rc = fsi_spi_reset(ctx);
if (rc)
return rc;
return -EREMOTEIO;
}
return 0;
}
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
{
/*
* Add the next byte of instruction to the 8-byte sequence register.
* Then decrement the counter so that the next instruction will go in
* the right place. Return the index of the slot we just filled in the
* sequence register.
*/
seq->data |= (u64)val << seq->bit;
seq->bit -= 8;
}
static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
{
seq->bit = 56;
seq->data = 0ULL;
}
static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
int loops;
int rc = 0;
unsigned long end;
u64 status = 0ULL;
if (transfer->tx_buf) {
int nb;
int sent = 0;
u64 out = 0ULL;
const u8 *tx = transfer->tx_buf;
while (transfer->len > sent) {
nb = fsi_spi_data_out(&out, &tx[sent],
(int)transfer->len - sent);
rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
if (rc)
return rc;
loops = 0;
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
}
} else if (transfer->rx_buf) {
int recv = 0;
u64 in = 0ULL;
u8 *rx = transfer->rx_buf;
while (transfer->len > recv) {
loops = 0;
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
if (rc)
return rc;
recv += fsi_spi_data_in(in, &rx[recv],
(int)transfer->len - recv);
}
}
return 0;
}
static int fsi_spi_transfer_init(struct fsi_spi *ctx)
{
int loops = 0;
int rc;
bool reset = false;
unsigned long end;
u64 seq_state;
u64 clock_cfg = 0ULL;
u64 status = 0ULL;
u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
if (rc)
return rc;
seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
if (status & (SPI_FSI_STATUS_ANY_ERROR |
SPI_FSI_STATUS_TDR_FULL |
SPI_FSI_STATUS_RDR_FULL)) {
if (reset) {
dev_err(ctx->dev,
"Initialization error: %08llx\n",
status);
return -EIO;
}
rc = fsi_spi_reset(ctx);
if (rc)
return rc;
reset = true;
continue;
}
} while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
if (rc)
return rc;
rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
if (rc)
return rc;
if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_MODE |
SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
wanted_clock_cfg);
return rc;
}
static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *mesg)
{
int rc;
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(spi_get_chipselect(mesg->spi, 0) + 1);
unsigned int len;
struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev);
if (rc)
goto error;
list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
struct fsi_spi_sequence seq;
struct spi_transfer *next = NULL;
/* Sequencer must do shift out (tx) first. */
if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
rc = -EINVAL;
goto error;
}
dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
rc = fsi_spi_transfer_init(ctx);
if (rc < 0)
goto error;
fsi_spi_sequence_init(&seq);
fsi_spi_sequence_add(&seq, seq_slave);
len = transfer->len;
while (len > 8) {
fsi_spi_sequence_add(&seq,
SPI_FSI_SEQUENCE_SHIFT_OUT(8));
len -= 8;
}
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
if (!list_is_last(&transfer->transfer_list,
&mesg->transfers)) {
next = list_next_entry(transfer, transfer_list);
/* Sequencer can only do shift in (rx) after tx. */
if (next->rx_buf) {
u8 shift;
if (next->len > SPI_FSI_MAX_RX_SIZE) {
rc = -EINVAL;
goto error;
}
dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
next->len);
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
} else {
next = NULL;
}
}
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
if (rc)
goto error;
rc = fsi_spi_transfer_data(ctx, transfer);
if (rc)
goto error;
if (next) {
rc = fsi_spi_transfer_data(ctx, next);
if (rc)
goto error;
transfer = next;
}
}
error:
mesg->status = rc;
spi_finalize_current_message(ctlr);
return rc;
}
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
{
return SPI_FSI_MAX_RX_SIZE;
}
static int fsi_spi_probe(struct device *dev)
{
int rc;
struct device_node *np;
int num_controllers_registered = 0;
struct fsi2spi *bridge;
struct fsi_device *fsi = to_fsi_dev(dev);
rc = fsi_spi_check_mux(fsi, dev);
if (rc)
return -ENODEV;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->fsi = fsi;
mutex_init(&bridge->lock);
for_each_available_child_of_node(dev->of_node, np) {
u32 base;
struct fsi_spi *ctx;
struct spi_controller *ctlr;
if (of_property_read_u32(np, "reg", &base))
continue;
ctlr = spi_alloc_host(dev, sizeof(*ctx));
if (!ctlr) {
of_node_put(np);
break;
}
ctlr->dev.of_node = np;
ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
ctlr->max_transfer_size = fsi_spi_max_transfer_size;
ctlr->transfer_one_message = fsi_spi_transfer_one_message;
ctx = spi_controller_get_devdata(ctlr);
ctx->dev = &ctlr->dev;
ctx->bridge = bridge;
ctx->base = base + SPI_FSI_BASE;
rc = devm_spi_register_controller(dev, ctlr);
if (rc)
spi_controller_put(ctlr);
else
num_controllers_registered++;
}
if (!num_controllers_registered)
return -ENODEV;
return 0;
}
static const struct fsi_device_id fsi_spi_ids[] = {
{ FSI_ENGID_SPI, FSI_VERSION_ANY },
{ }
};
MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
static struct fsi_driver fsi_spi_driver = {
.id_table = fsi_spi_ids,
.drv = {
.name = "spi-fsi",
.bus = &fsi_bus_type,
.probe = fsi_spi_probe,
},
};
module_fsi_driver(fsi_spi_driver);
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("FSI attached SPI controller");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ADXL345/346 Three-Axis Digital Accelerometers
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Copyright (C) 2009 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/input/adxl34x.h>
#include <linux/module.h>
#include "adxl34x.h"
/* ADXL345/6 Register Map */
#define DEVID 0x00 /* R Device ID */
#define THRESH_TAP 0x1D /* R/W Tap threshold */
#define OFSX 0x1E /* R/W X-axis offset */
#define OFSY 0x1F /* R/W Y-axis offset */
#define OFSZ 0x20 /* R/W Z-axis offset */
#define DUR 0x21 /* R/W Tap duration */
#define LATENT 0x22 /* R/W Tap latency */
#define WINDOW 0x23 /* R/W Tap window */
#define THRESH_ACT 0x24 /* R/W Activity threshold */
#define THRESH_INACT 0x25 /* R/W Inactivity threshold */
#define TIME_INACT 0x26 /* R/W Inactivity time */
#define ACT_INACT_CTL 0x27 /* R/W Axis enable control for activity and */
/* inactivity detection */
#define THRESH_FF 0x28 /* R/W Free-fall threshold */
#define TIME_FF 0x29 /* R/W Free-fall time */
#define TAP_AXES 0x2A /* R/W Axis control for tap/double tap */
#define ACT_TAP_STATUS 0x2B /* R Source of tap/double tap */
#define BW_RATE 0x2C /* R/W Data rate and power mode control */
#define POWER_CTL 0x2D /* R/W Power saving features control */
#define INT_ENABLE 0x2E /* R/W Interrupt enable control */
#define INT_MAP 0x2F /* R/W Interrupt mapping control */
#define INT_SOURCE 0x30 /* R Source of interrupts */
#define DATA_FORMAT 0x31 /* R/W Data format control */
#define DATAX0 0x32 /* R X-Axis Data 0 */
#define DATAX1 0x33 /* R X-Axis Data 1 */
#define DATAY0 0x34 /* R Y-Axis Data 0 */
#define DATAY1 0x35 /* R Y-Axis Data 1 */
#define DATAZ0 0x36 /* R Z-Axis Data 0 */
#define DATAZ1 0x37 /* R Z-Axis Data 1 */
#define FIFO_CTL 0x38 /* R/W FIFO control */
#define FIFO_STATUS 0x39 /* R FIFO status */
#define TAP_SIGN 0x3A /* R Sign and source for tap/double tap */
/* Orientation ADXL346 only */
#define ORIENT_CONF 0x3B /* R/W Orientation configuration */
#define ORIENT 0x3C /* R Orientation status */
/* DEVIDs */
#define ID_ADXL345 0xE5
#define ID_ADXL346 0xE6
/* INT_ENABLE/INT_MAP/INT_SOURCE Bits */
#define DATA_READY (1 << 7)
#define SINGLE_TAP (1 << 6)
#define DOUBLE_TAP (1 << 5)
#define ACTIVITY (1 << 4)
#define INACTIVITY (1 << 3)
#define FREE_FALL (1 << 2)
#define WATERMARK (1 << 1)
#define OVERRUN (1 << 0)
/* ACT_INACT_CONTROL Bits */
#define ACT_ACDC (1 << 7)
#define ACT_X_EN (1 << 6)
#define ACT_Y_EN (1 << 5)
#define ACT_Z_EN (1 << 4)
#define INACT_ACDC (1 << 3)
#define INACT_X_EN (1 << 2)
#define INACT_Y_EN (1 << 1)
#define INACT_Z_EN (1 << 0)
/* TAP_AXES Bits */
#define SUPPRESS (1 << 3)
#define TAP_X_EN (1 << 2)
#define TAP_Y_EN (1 << 1)
#define TAP_Z_EN (1 << 0)
/* ACT_TAP_STATUS Bits */
#define ACT_X_SRC (1 << 6)
#define ACT_Y_SRC (1 << 5)
#define ACT_Z_SRC (1 << 4)
#define ASLEEP (1 << 3)
#define TAP_X_SRC (1 << 2)
#define TAP_Y_SRC (1 << 1)
#define TAP_Z_SRC (1 << 0)
/* BW_RATE Bits */
#define LOW_POWER (1 << 4)
#define RATE(x) ((x) & 0xF)
/* POWER_CTL Bits */
#define PCTL_LINK (1 << 5)
#define PCTL_AUTO_SLEEP (1 << 4)
#define PCTL_MEASURE (1 << 3)
#define PCTL_SLEEP (1 << 2)
#define PCTL_WAKEUP(x) ((x) & 0x3)
/* DATA_FORMAT Bits */
#define SELF_TEST (1 << 7)
#define SPI (1 << 6)
#define INT_INVERT (1 << 5)
#define FULL_RES (1 << 3)
#define JUSTIFY (1 << 2)
#define RANGE(x) ((x) & 0x3)
#define RANGE_PM_2g 0
#define RANGE_PM_4g 1
#define RANGE_PM_8g 2
#define RANGE_PM_16g 3
/*
* Maximum value our axis may get in full res mode for the input device
* (signed 13 bits)
*/
#define ADXL_FULLRES_MAX_VAL 4096
/*
* Maximum value our axis may get in fixed res mode for the input device
* (signed 10 bits)
*/
#define ADXL_FIXEDRES_MAX_VAL 512
/* FIFO_CTL Bits */
#define FIFO_MODE(x) (((x) & 0x3) << 6)
#define FIFO_BYPASS 0
#define FIFO_FIFO 1
#define FIFO_STREAM 2
#define FIFO_TRIGGER 3
#define TRIGGER (1 << 5)
#define SAMPLES(x) ((x) & 0x1F)
/* FIFO_STATUS Bits */
#define FIFO_TRIG (1 << 7)
#define ENTRIES(x) ((x) & 0x3F)
/* TAP_SIGN Bits ADXL346 only */
#define XSIGN (1 << 6)
#define YSIGN (1 << 5)
#define ZSIGN (1 << 4)
#define XTAP (1 << 3)
#define YTAP (1 << 2)
#define ZTAP (1 << 1)
/* ORIENT_CONF ADXL346 only */
#define ORIENT_DEADZONE(x) (((x) & 0x7) << 4)
#define ORIENT_DIVISOR(x) ((x) & 0x7)
/* ORIENT ADXL346 only */
#define ADXL346_2D_VALID (1 << 6)
#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
#define ADXL346_3D_VALID (1 << 3)
#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
#define ADXL346_2D_PORTRAIT_NEG 1 /* -X */
#define ADXL346_2D_LANDSCAPE_POS 2 /* +Y */
#define ADXL346_2D_LANDSCAPE_NEG 3 /* -Y */
#define ADXL346_3D_FRONT 3 /* +X */
#define ADXL346_3D_BACK 4 /* -X */
#define ADXL346_3D_RIGHT 2 /* +Y */
#define ADXL346_3D_LEFT 5 /* -Y */
#define ADXL346_3D_TOP 1 /* +Z */
#define ADXL346_3D_BOTTOM 6 /* -Z */
#undef ADXL_DEBUG
#define ADXL_X_AXIS 0
#define ADXL_Y_AXIS 1
#define ADXL_Z_AXIS 2
#define AC_READ(ac, reg) ((ac)->bops->read((ac)->dev, reg))
#define AC_WRITE(ac, reg, val) ((ac)->bops->write((ac)->dev, reg, val))
struct axis_triple {
int x;
int y;
int z;
};
struct adxl34x {
struct device *dev;
struct input_dev *input;
struct mutex mutex; /* reentrant protection for struct */
struct adxl34x_platform_data pdata;
struct axis_triple swcal;
struct axis_triple hwcal;
struct axis_triple saved;
char phys[32];
unsigned orient2d_saved;
unsigned orient3d_saved;
bool disabled; /* P: mutex */
bool opened; /* P: mutex */
bool suspended; /* P: mutex */
bool fifo_delay;
int irq;
unsigned model;
unsigned int_mask;
const struct adxl34x_bus_ops *bops;
};
static const struct adxl34x_platform_data adxl34x_default_init = {
.tap_threshold = 35,
.tap_duration = 3,
.tap_latency = 20,
.tap_window = 20,
.tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
.act_axis_control = 0xFF,
.activity_threshold = 6,
.inactivity_threshold = 4,
.inactivity_time = 3,
.free_fall_threshold = 8,
.free_fall_time = 0x20,
.data_rate = 8,
.data_range = ADXL_FULL_RES,
.ev_type = EV_ABS,
.ev_code_x = ABS_X, /* EV_REL */
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
.fifo_mode = ADXL_FIFO_STREAM,
.watermark = 0,
};
static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis)
{
__le16 buf[3];
ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf);
guard(mutex)(&ac->mutex);
ac->saved.x = (s16) le16_to_cpu(buf[0]);
axis->x = ac->saved.x;
ac->saved.y = (s16) le16_to_cpu(buf[1]);
axis->y = ac->saved.y;
ac->saved.z = (s16) le16_to_cpu(buf[2]);
axis->z = ac->saved.z;
}
static void adxl34x_service_ev_fifo(struct adxl34x *ac)
{
struct adxl34x_platform_data *pdata = &ac->pdata;
struct axis_triple axis;
adxl34x_get_triple(ac, &axis);
input_event(ac->input, pdata->ev_type, pdata->ev_code_x,
axis.x - ac->swcal.x);
input_event(ac->input, pdata->ev_type, pdata->ev_code_y,
axis.y - ac->swcal.y);
input_event(ac->input, pdata->ev_type, pdata->ev_code_z,
axis.z - ac->swcal.z);
}
static void adxl34x_report_key_single(struct input_dev *input, int key)
{
input_report_key(input, key, true);
input_sync(input);
input_report_key(input, key, false);
}
static void adxl34x_send_key_events(struct adxl34x *ac,
struct adxl34x_platform_data *pdata, int status, int press)
{
int i;
for (i = ADXL_X_AXIS; i <= ADXL_Z_AXIS; i++) {
if (status & (1 << (ADXL_Z_AXIS - i)))
input_report_key(ac->input,
pdata->ev_code_tap[i], press);
}
}
static void adxl34x_do_tap(struct adxl34x *ac,
struct adxl34x_platform_data *pdata, int status)
{
adxl34x_send_key_events(ac, pdata, status, true);
input_sync(ac->input);
adxl34x_send_key_events(ac, pdata, status, false);
}
static irqreturn_t adxl34x_irq(int irq, void *handle)
{
struct adxl34x *ac = handle;
struct adxl34x_platform_data *pdata = &ac->pdata;
int int_stat, tap_stat, samples, orient, orient_code;
/*
* ACT_TAP_STATUS should be read before clearing the interrupt
* Avoid reading ACT_TAP_STATUS in case TAP detection is disabled
*/
if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN))
tap_stat = AC_READ(ac, ACT_TAP_STATUS);
else
tap_stat = 0;
int_stat = AC_READ(ac, INT_SOURCE);
if (int_stat & FREE_FALL)
adxl34x_report_key_single(ac->input, pdata->ev_code_ff);
if (int_stat & OVERRUN)
dev_dbg(ac->dev, "OVERRUN\n");
if (int_stat & (SINGLE_TAP | DOUBLE_TAP)) {
adxl34x_do_tap(ac, pdata, tap_stat);
if (int_stat & DOUBLE_TAP)
adxl34x_do_tap(ac, pdata, tap_stat);
}
if (pdata->ev_code_act_inactivity) {
if (int_stat & ACTIVITY)
input_report_key(ac->input,
pdata->ev_code_act_inactivity, 1);
if (int_stat & INACTIVITY)
input_report_key(ac->input,
pdata->ev_code_act_inactivity, 0);
}
/*
* ORIENTATION SENSING ADXL346 only
*/
if (pdata->orientation_enable) {
orient = AC_READ(ac, ORIENT);
if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) &&
(orient & ADXL346_2D_VALID)) {
orient_code = ADXL346_2D_ORIENT(orient);
/* Report orientation only when it changes */
if (ac->orient2d_saved != orient_code) {
ac->orient2d_saved = orient_code;
adxl34x_report_key_single(ac->input,
pdata->ev_codes_orient_2d[orient_code]);
}
}
if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) &&
(orient & ADXL346_3D_VALID)) {
orient_code = ADXL346_3D_ORIENT(orient) - 1;
/* Report orientation only when it changes */
if (ac->orient3d_saved != orient_code) {
ac->orient3d_saved = orient_code;
adxl34x_report_key_single(ac->input,
pdata->ev_codes_orient_3d[orient_code]);
}
}
}
if (int_stat & (DATA_READY | WATERMARK)) {
if (pdata->fifo_mode)
samples = ENTRIES(AC_READ(ac, FIFO_STATUS)) + 1;
else
samples = 1;
for (; samples > 0; samples--) {
adxl34x_service_ev_fifo(ac);
/*
* To ensure that the FIFO has
* completely popped, there must be at least 5 us between
* the end of reading the data registers, signified by the
* transition to register 0x38 from 0x37 or the CS pin
* going high, and the start of new reads of the FIFO or
* reading the FIFO_STATUS register. For SPI operation at
* 1.5 MHz or lower, the register addressing portion of the
* transmission is sufficient delay to ensure the FIFO has
* completely popped. It is necessary for SPI operation
* greater than 1.5 MHz to de-assert the CS pin to ensure a
* total of 5 us, which is at most 3.4 us at 5 MHz
* operation.
*/
if (ac->fifo_delay && (samples > 1))
udelay(3);
}
}
input_sync(ac->input);
return IRQ_HANDLED;
}
static void __adxl34x_disable(struct adxl34x *ac)
{
/*
* A '0' places the ADXL34x into standby mode
* with minimum power consumption.
*/
AC_WRITE(ac, POWER_CTL, 0);
}
static void __adxl34x_enable(struct adxl34x *ac)
{
AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE);
}
static int adxl34x_suspend(struct device *dev)
{
struct adxl34x *ac = dev_get_drvdata(dev);
guard(mutex)(&ac->mutex);
if (!ac->suspended && !ac->disabled && ac->opened)
__adxl34x_disable(ac);
ac->suspended = true;
return 0;
}
static int adxl34x_resume(struct device *dev)
{
struct adxl34x *ac = dev_get_drvdata(dev);
guard(mutex)(&ac->mutex);
if (ac->suspended && !ac->disabled && ac->opened)
__adxl34x_enable(ac);
ac->suspended = false;
return 0;
}
static ssize_t adxl34x_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ac->disabled);
}
static ssize_t adxl34x_disable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
guard(mutex)(&ac->mutex);
if (!ac->suspended && ac->opened) {
if (val) {
if (!ac->disabled)
__adxl34x_disable(ac);
} else {
if (ac->disabled)
__adxl34x_enable(ac);
}
}
ac->disabled = !!val;
return count;
}
static DEVICE_ATTR(disable, 0664, adxl34x_disable_show, adxl34x_disable_store);
static ssize_t adxl34x_calibrate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
guard(mutex)(&ac->mutex);
return sprintf(buf, "%d,%d,%d\n",
ac->hwcal.x * 4 + ac->swcal.x,
ac->hwcal.y * 4 + ac->swcal.y,
ac->hwcal.z * 4 + ac->swcal.z);
}
static ssize_t adxl34x_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
/*
* Hardware offset calibration has a resolution of 15.6 mg/LSB.
* We use HW calibration and handle the remaining bits in SW. (4mg/LSB)
*/
guard(mutex)(&ac->mutex);
ac->hwcal.x -= (ac->saved.x / 4);
ac->swcal.x = ac->saved.x % 4;
ac->hwcal.y -= (ac->saved.y / 4);
ac->swcal.y = ac->saved.y % 4;
ac->hwcal.z -= (ac->saved.z / 4);
ac->swcal.z = ac->saved.z % 4;
AC_WRITE(ac, OFSX, (s8) ac->hwcal.x);
AC_WRITE(ac, OFSY, (s8) ac->hwcal.y);
AC_WRITE(ac, OFSZ, (s8) ac->hwcal.z);
return count;
}
static DEVICE_ATTR(calibrate, 0664,
adxl34x_calibrate_show, adxl34x_calibrate_store);
static ssize_t adxl34x_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", RATE(ac->pdata.data_rate));
}
static ssize_t adxl34x_rate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned char val;
int error;
error = kstrtou8(buf, 10, &val);
if (error)
return error;
guard(mutex)(&ac->mutex);
ac->pdata.data_rate = RATE(val);
AC_WRITE(ac, BW_RATE,
ac->pdata.data_rate |
(ac->pdata.low_power_mode ? LOW_POWER : 0));
return count;
}
static DEVICE_ATTR(rate, 0664, adxl34x_rate_show, adxl34x_rate_store);
static ssize_t adxl34x_autosleep_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n",
ac->pdata.power_mode & (PCTL_AUTO_SLEEP | PCTL_LINK) ? 1 : 0);
}
static ssize_t adxl34x_autosleep_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
guard(mutex)(&ac->mutex);
if (val)
ac->pdata.power_mode |= (PCTL_AUTO_SLEEP | PCTL_LINK);
else
ac->pdata.power_mode &= ~(PCTL_AUTO_SLEEP | PCTL_LINK);
if (!ac->disabled && !ac->suspended && ac->opened)
AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE);
return count;
}
static DEVICE_ATTR(autosleep, 0664,
adxl34x_autosleep_show, adxl34x_autosleep_store);
static ssize_t adxl34x_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
guard(mutex)(&ac->mutex);
return sprintf(buf, "(%d, %d, %d)\n",
ac->saved.x, ac->saved.y, ac->saved.z);
}
static DEVICE_ATTR(position, S_IRUGO, adxl34x_position_show, NULL);
#ifdef ADXL_DEBUG
static ssize_t adxl34x_write_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
/*
* This allows basic ADXL register write access for debug purposes.
*/
error = kstrtouint(buf, 16, &val);
if (error)
return error;
guard(mutex)(&ac->mutex);
AC_WRITE(ac, val >> 8, val & 0xFF);
return count;
}
static DEVICE_ATTR(write, 0664, NULL, adxl34x_write_store);
#endif
static struct attribute *adxl34x_attributes[] = {
&dev_attr_disable.attr,
&dev_attr_calibrate.attr,
&dev_attr_rate.attr,
&dev_attr_autosleep.attr,
&dev_attr_position.attr,
#ifdef ADXL_DEBUG
&dev_attr_write.attr,
#endif
NULL
};
static const struct attribute_group adxl34x_attr_group = {
.attrs = adxl34x_attributes,
};
const struct attribute_group *adxl34x_groups[] = {
&adxl34x_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(adxl34x_groups);
static int adxl34x_input_open(struct input_dev *input)
{
struct adxl34x *ac = input_get_drvdata(input);
guard(mutex)(&ac->mutex);
if (!ac->suspended && !ac->disabled)
__adxl34x_enable(ac);
ac->opened = true;
return 0;
}
static void adxl34x_input_close(struct input_dev *input)
{
struct adxl34x *ac = input_get_drvdata(input);
guard(mutex)(&ac->mutex);
if (!ac->suspended && !ac->disabled)
__adxl34x_disable(ac);
ac->opened = false;
}
struct adxl34x *adxl34x_probe(struct device *dev, int irq,
bool fifo_delay_default,
const struct adxl34x_bus_ops *bops)
{
struct adxl34x *ac;
struct input_dev *input_dev;
const struct adxl34x_platform_data *pdata;
int error, range, i;
int revid;
if (!irq) {
dev_err(dev, "no IRQ?\n");
return ERR_PTR(-ENODEV);
}
ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
if (!ac)
return ERR_PTR(-ENOMEM);
input_dev = devm_input_allocate_device(dev);
if (!input_dev)
return ERR_PTR(-ENOMEM);
ac->fifo_delay = fifo_delay_default;
pdata = dev_get_platdata(dev);
if (!pdata) {
dev_dbg(dev,
"No platform data: Using default initialization\n");
pdata = &adxl34x_default_init;
}
ac->pdata = *pdata;
pdata = &ac->pdata;
ac->input = input_dev;
ac->dev = dev;
ac->irq = irq;
ac->bops = bops;
mutex_init(&ac->mutex);
input_dev->name = "ADXL34x accelerometer";
revid = AC_READ(ac, DEVID);
switch (revid) {
case ID_ADXL345:
ac->model = 345;
break;
case ID_ADXL346:
ac->model = 346;
break;
default:
dev_err(dev, "Failed to probe %s\n", input_dev->name);
return ERR_PTR(-ENODEV);
}
snprintf(ac->phys, sizeof(ac->phys), "%s/input0", dev_name(dev));
input_dev->phys = ac->phys;
input_dev->id.product = ac->model;
input_dev->id.bustype = bops->bustype;
input_dev->open = adxl34x_input_open;
input_dev->close = adxl34x_input_close;
input_set_drvdata(input_dev, ac);
if (ac->pdata.ev_type == EV_REL) {
input_set_capability(input_dev, EV_REL, REL_X);
input_set_capability(input_dev, EV_REL, REL_Y);
input_set_capability(input_dev, EV_REL, REL_Z);
} else {
/* EV_ABS */
if (pdata->data_range & FULL_RES)
range = ADXL_FULLRES_MAX_VAL; /* Signed 13-bit */
else
range = ADXL_FIXEDRES_MAX_VAL; /* Signed 10-bit */
input_set_abs_params(input_dev, ABS_X, -range, range, 3, 3);
input_set_abs_params(input_dev, ABS_Y, -range, range, 3, 3);
input_set_abs_params(input_dev, ABS_Z, -range, range, 3, 3);
}
input_set_capability(input_dev, EV_KEY, pdata->ev_code_tap[ADXL_X_AXIS]);
input_set_capability(input_dev, EV_KEY, pdata->ev_code_tap[ADXL_Y_AXIS]);
input_set_capability(input_dev, EV_KEY, pdata->ev_code_tap[ADXL_Z_AXIS]);
if (pdata->ev_code_ff) {
ac->int_mask = FREE_FALL;
input_set_capability(input_dev, EV_KEY, pdata->ev_code_ff);
}
if (pdata->ev_code_act_inactivity)
input_set_capability(input_dev, EV_KEY,
pdata->ev_code_act_inactivity);
ac->int_mask |= ACTIVITY | INACTIVITY;
if (pdata->watermark) {
ac->int_mask |= WATERMARK;
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->pdata.fifo_mode |= FIFO_STREAM;
} else {
ac->int_mask |= DATA_READY;
}
if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN))
ac->int_mask |= SINGLE_TAP | DOUBLE_TAP;
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->fifo_delay = false;
AC_WRITE(ac, POWER_CTL, 0);
error = devm_request_threaded_irq(dev, ac->irq, NULL, adxl34x_irq,
IRQF_ONESHOT, dev_name(dev), ac);
if (error) {
dev_err(dev, "irq %d busy?\n", ac->irq);
return ERR_PTR(error);
}
error = input_register_device(input_dev);
if (error)
return ERR_PTR(error);
AC_WRITE(ac, OFSX, pdata->x_axis_offset);
ac->hwcal.x = pdata->x_axis_offset;
AC_WRITE(ac, OFSY, pdata->y_axis_offset);
ac->hwcal.y = pdata->y_axis_offset;
AC_WRITE(ac, OFSZ, pdata->z_axis_offset);
ac->hwcal.z = pdata->z_axis_offset;
AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
AC_WRITE(ac, DUR, pdata->tap_duration);
AC_WRITE(ac, LATENT, pdata->tap_latency);
AC_WRITE(ac, WINDOW, pdata->tap_window);
AC_WRITE(ac, THRESH_ACT, pdata->activity_threshold);
AC_WRITE(ac, THRESH_INACT, pdata->inactivity_threshold);
AC_WRITE(ac, TIME_INACT, pdata->inactivity_time);
AC_WRITE(ac, THRESH_FF, pdata->free_fall_threshold);
AC_WRITE(ac, TIME_FF, pdata->free_fall_time);
AC_WRITE(ac, TAP_AXES, pdata->tap_axis_control);
AC_WRITE(ac, ACT_INACT_CTL, pdata->act_axis_control);
AC_WRITE(ac, BW_RATE, RATE(ac->pdata.data_rate) |
(pdata->low_power_mode ? LOW_POWER : 0));
AC_WRITE(ac, DATA_FORMAT, pdata->data_range);
AC_WRITE(ac, FIFO_CTL, FIFO_MODE(pdata->fifo_mode) |
SAMPLES(pdata->watermark));
if (pdata->use_int2) {
/* Map all INTs to INT2 */
AC_WRITE(ac, INT_MAP, ac->int_mask | OVERRUN);
} else {
/* Map all INTs to INT1 */
AC_WRITE(ac, INT_MAP, 0);
}
if (ac->model == 346 && ac->pdata.orientation_enable) {
AC_WRITE(ac, ORIENT_CONF,
ORIENT_DEADZONE(ac->pdata.deadzone_angle) |
ORIENT_DIVISOR(ac->pdata.divisor_length));
ac->orient2d_saved = 1234;
ac->orient3d_saved = 1234;
if (pdata->orientation_enable & ADXL_EN_ORIENTATION_3D)
for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_3d); i++)
input_set_capability(input_dev, EV_KEY,
pdata->ev_codes_orient_3d[i]);
if (pdata->orientation_enable & ADXL_EN_ORIENTATION_2D)
for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_2d); i++)
input_set_capability(input_dev, EV_KEY,
pdata->ev_codes_orient_2d[i]);
} else {
ac->pdata.orientation_enable = 0;
}
AC_WRITE(ac, INT_ENABLE, ac->int_mask | OVERRUN);
ac->pdata.power_mode &= (PCTL_AUTO_SLEEP | PCTL_LINK);
return ac;
}
EXPORT_SYMBOL_GPL(adxl34x_probe);
EXPORT_GPL_SIMPLE_DEV_PM_OPS(adxl34x_pm, adxl34x_suspend, adxl34x_resume);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer Driver");
MODULE_LICENSE("GPL");
|
/*
* Core definitions and data structures shareable across OS platforms.
*
* Copyright (c) 1994-2001 Justin T. Gibbs.
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $
*
* $FreeBSD$
*/
#ifndef _AIC7XXX_H_
#define _AIC7XXX_H_
/* Register Definitions */
#include "aic7xxx_reg.h"
/************************* Forward Declarations *******************************/
struct ahc_platform_data;
struct scb_platform_data;
struct seeprom_descriptor;
/****************************** Useful Macros *********************************/
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
#define ALL_CHANNELS '\0'
#define ALL_TARGETS_MASK 0xFFFF
#define INITIATOR_WILDCARD (~0)
#define SCSIID_TARGET(ahc, scsiid) \
(((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \
>> TID_SHIFT)
#define SCSIID_OUR_ID(scsiid) \
((scsiid) & OID)
#define SCSIID_CHANNEL(ahc, scsiid) \
((((ahc)->features & AHC_TWIN) != 0) \
? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \
: 'A')
#define SCB_IS_SCSIBUS_B(ahc, scb) \
(SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B')
#define SCB_GET_OUR_ID(scb) \
SCSIID_OUR_ID((scb)->hscb->scsiid)
#define SCB_GET_TARGET(ahc, scb) \
SCSIID_TARGET((ahc), (scb)->hscb->scsiid)
#define SCB_GET_CHANNEL(ahc, scb) \
SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid)
#define SCB_GET_LUN(scb) \
((scb)->hscb->lun & LID)
#define SCB_GET_TARGET_OFFSET(ahc, scb) \
(SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0))
#define SCB_GET_TARGET_MASK(ahc, scb) \
(0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb)))
#ifdef AHC_DEBUG
#define SCB_IS_SILENT(scb) \
((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0 \
&& (((scb)->flags & SCB_SILENT) != 0))
#else
#define SCB_IS_SILENT(scb) \
(((scb)->flags & SCB_SILENT) != 0)
#endif
#define TCL_TARGET_OFFSET(tcl) \
((((tcl) >> 4) & TID) >> 4)
#define TCL_LUN(tcl) \
(tcl & (AHC_NUM_LUNS - 1))
#define BUILD_TCL(scsiid, lun) \
((lun) | (((scsiid) & TID) << 4))
#ifndef AHC_TARGET_MODE
#undef AHC_TMODE_ENABLE
#define AHC_TMODE_ENABLE 0
#endif
/**************************** Driver Constants ********************************/
/*
* The maximum number of supported targets.
*/
#define AHC_NUM_TARGETS 16
/*
* The maximum number of supported luns.
* The identify message only supports 64 luns in SPI3.
* You can have 2^64 luns when information unit transfers are enabled,
* but it is doubtful this driver will ever support IUTs.
*/
#define AHC_NUM_LUNS 64
/*
* The maximum transfer per S/G segment.
*/
#define AHC_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */
/*
* The maximum amount of SCB storage in hardware on a controller.
* This value represents an upper bound. Controllers vary in the number
* they actually support.
*/
#define AHC_SCB_MAX 255
/*
* The maximum number of concurrent transactions supported per driver instance.
* Sequencer Control Blocks (SCBs) store per-transaction information. Although
* the space for SCBs on the host adapter varies by model, the driver will
* page the SCBs between host and controller memory as needed. We are limited
* to 253 because:
* 1) The 8bit nature of the RISC engine holds us to an 8bit value.
* 2) We reserve one value, 255, to represent the invalid element.
* 3) Our input queue scheme requires one SCB to always be reserved
* in advance of queuing any SCBs. This takes us down to 254.
* 4) To handle our output queue correctly on machines that only
* support 32bit stores, we must clear the array 4 bytes at a
* time. To avoid colliding with a DMA write from the sequencer,
* we must be sure that 4 slots are empty when we write to clear
* the queue. This reduces us to 253 SCBs: 1 that just completed
* and the known three additional empty slots in the queue that
* precede it.
*/
#define AHC_MAX_QUEUE 253
/*
* The maximum amount of SCB storage we allocate in host memory. This
* number should reflect the 1 additional SCB we require to handle our
* qinfifo mechanism.
*/
#define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1)
/*
* Ring Buffer of incoming target commands.
* We allocate 256 to simplify the logic in the sequencer
* by using the natural wrap point of an 8bit counter.
*/
#define AHC_TMODE_CMDS 256
/* Reset line assertion time in us */
#define AHC_BUSRESET_DELAY 25
/******************* Chip Characteristics/Operating Settings *****************/
/*
* Chip Type
* The chip order is from least sophisticated to most sophisticated.
*/
typedef enum {
AHC_NONE = 0x0000,
AHC_CHIPID_MASK = 0x00FF,
AHC_AIC7770 = 0x0001,
AHC_AIC7850 = 0x0002,
AHC_AIC7855 = 0x0003,
AHC_AIC7859 = 0x0004,
AHC_AIC7860 = 0x0005,
AHC_AIC7870 = 0x0006,
AHC_AIC7880 = 0x0007,
AHC_AIC7895 = 0x0008,
AHC_AIC7895C = 0x0009,
AHC_AIC7890 = 0x000a,
AHC_AIC7896 = 0x000b,
AHC_AIC7892 = 0x000c,
AHC_AIC7899 = 0x000d,
AHC_VL = 0x0100, /* Bus type VL */
AHC_EISA = 0x0200, /* Bus type EISA */
AHC_PCI = 0x0400, /* Bus type PCI */
AHC_BUS_MASK = 0x0F00
} ahc_chip;
/*
* Features available in each chip type.
*/
typedef enum {
AHC_FENONE = 0x00000,
AHC_ULTRA = 0x00001, /* Supports 20MHz Transfers */
AHC_ULTRA2 = 0x00002, /* Supports 40MHz Transfers */
AHC_WIDE = 0x00004, /* Wide Channel */
AHC_TWIN = 0x00008, /* Twin Channel */
AHC_MORE_SRAM = 0x00010, /* 80 bytes instead of 64 */
AHC_CMD_CHAN = 0x00020, /* Has a Command DMA Channel */
AHC_QUEUE_REGS = 0x00040, /* Has Queue management registers */
AHC_SG_PRELOAD = 0x00080, /* Can perform auto-SG preload */
AHC_SPIOCAP = 0x00100, /* Has a Serial Port I/O Cap Register */
AHC_MULTI_TID = 0x00200, /* Has bitmask of TIDs for select-in */
AHC_HS_MAILBOX = 0x00400, /* Has HS_MAILBOX register */
AHC_DT = 0x00800, /* Double Transition transfers */
AHC_NEW_TERMCTL = 0x01000, /* Newer termination scheme */
AHC_MULTI_FUNC = 0x02000, /* Multi-Function Twin Channel Device */
AHC_LARGE_SCBS = 0x04000, /* 64byte SCBs */
AHC_AUTORATE = 0x08000, /* Automatic update of SCSIRATE/OFFSET*/
AHC_AUTOPAUSE = 0x10000, /* Automatic pause on register access */
AHC_TARGETMODE = 0x20000, /* Has tested target mode support */
AHC_MULTIROLE = 0x40000, /* Space for two roles at a time */
AHC_REMOVABLE = 0x80000, /* Hot-Swap supported */
AHC_HVD = 0x100000, /* HVD rather than SE */
AHC_AIC7770_FE = AHC_FENONE,
/*
* The real 7850 does not support Ultra modes, but there are
* several cards that use the generic 7850 PCI ID even though
* they are using an Ultra capable chip (7859/7860). We start
* out with the AHC_ULTRA feature set and then check the DEVSTATUS
* register to determine if the capability is really present.
*/
AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
AHC_AIC7860_FE = AHC_AIC7850_FE,
AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE,
AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA,
/*
* Although we have space for both the initiator and
* target roles on ULTRA2 chips, we currently disable
* the initiator role to allow multi-scsi-id target mode
* configurations. We can only respond on the same SCSI
* ID as our initiator role if we allow initiator operation.
* At some point, we should add a configuration knob to
* allow both roles to be loaded.
*/
AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2
|AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID
|AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS
|AHC_TARGETMODE,
AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE,
AHC_AIC7895_FE = AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE
|AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS,
AHC_AIC7895C_FE = AHC_AIC7895_FE|AHC_MULTI_TID,
AHC_AIC7896_FE = AHC_AIC7890_FE|AHC_MULTI_FUNC,
AHC_AIC7899_FE = AHC_AIC7892_FE|AHC_MULTI_FUNC
} ahc_feature;
/*
* Bugs in the silicon that we work around in software.
*/
typedef enum {
AHC_BUGNONE = 0x00,
/*
* On all chips prior to the U2 product line,
* the WIDEODD S/G segment feature does not
* work during scsi->HostBus transfers.
*/
AHC_TMODE_WIDEODD_BUG = 0x01,
/*
* On the aic7890/91 Rev 0 chips, the autoflush
* feature does not work. A manual flush of
* the DMA FIFO is required.
*/
AHC_AUTOFLUSH_BUG = 0x02,
/*
* On many chips, cacheline streaming does not work.
*/
AHC_CACHETHEN_BUG = 0x04,
/*
* On the aic7896/97 chips, cacheline
* streaming must be enabled.
*/
AHC_CACHETHEN_DIS_BUG = 0x08,
/*
* PCI 2.1 Retry failure on non-empty data fifo.
*/
AHC_PCI_2_1_RETRY_BUG = 0x10,
/*
* Controller does not handle cacheline residuals
* properly on S/G segments if PCI MWI instructions
* are allowed.
*/
AHC_PCI_MWI_BUG = 0x20,
/*
* An SCB upload using the SCB channel's
* auto array entry copy feature may
* corrupt data. This appears to only
* occur on 66MHz systems.
*/
AHC_SCBCHAN_UPLOAD_BUG = 0x40
} ahc_bug;
/*
* Configuration specific settings.
* The driver determines these settings by probing the
* chip/controller's configuration.
*/
typedef enum {
AHC_FNONE = 0x000,
AHC_PRIMARY_CHANNEL = 0x003, /*
* The channel that should
* be probed first.
*/
AHC_USEDEFAULTS = 0x004, /*
* For cards without an seeprom
* or a BIOS to initialize the chip's
* SRAM, we use the default target
* settings.
*/
AHC_SEQUENCER_DEBUG = 0x008,
AHC_SHARED_SRAM = 0x010,
AHC_LARGE_SEEPROM = 0x020, /* Uses C56_66 not C46 */
AHC_RESET_BUS_A = 0x040,
AHC_RESET_BUS_B = 0x080,
AHC_EXTENDED_TRANS_A = 0x100,
AHC_EXTENDED_TRANS_B = 0x200,
AHC_TERM_ENB_A = 0x400,
AHC_TERM_ENB_B = 0x800,
AHC_INITIATORROLE = 0x1000, /*
* Allow initiator operations on
* this controller.
*/
AHC_TARGETROLE = 0x2000, /*
* Allow target operations on this
* controller.
*/
AHC_NEWEEPROM_FMT = 0x4000,
AHC_TQINFIFO_BLOCKED = 0x10000, /* Blocked waiting for ATIOs */
AHC_INT50_SPEEDFLEX = 0x20000, /*
* Internal 50pin connector
* sits behind an aic3860
*/
AHC_SCB_BTT = 0x40000, /*
* The busy targets table is
* stored in SCB space rather
* than SRAM.
*/
AHC_BIOS_ENABLED = 0x80000,
AHC_ALL_INTERRUPTS = 0x100000,
AHC_PAGESCBS = 0x400000, /* Enable SCB paging */
AHC_EDGE_INTERRUPT = 0x800000, /* Device uses edge triggered ints */
AHC_39BIT_ADDRESSING = 0x1000000, /* Use 39 bit addressing scheme. */
AHC_LSCBS_ENABLED = 0x2000000, /* 64Byte SCBs enabled */
AHC_SCB_CONFIG_USED = 0x4000000, /* No SEEPROM but SCB2 had info. */
AHC_NO_BIOS_INIT = 0x8000000, /* No BIOS left over settings. */
AHC_DISABLE_PCI_PERR = 0x10000000,
AHC_HAS_TERM_LOGIC = 0x20000000
} ahc_flag;
/************************* Hardware SCB Definition ***************************/
/*
* The driver keeps up to MAX_SCB scb structures per card in memory. The SCB
* consists of a "hardware SCB" mirroring the fields available on the card
* and additional information the kernel stores for each transaction.
*
* To minimize space utilization, a portion of the hardware scb stores
* different data during different portions of a SCSI transaction.
* As initialized by the host driver for the initiator role, this area
* contains the SCSI cdb (or a pointer to the cdb) to be executed. After
* the cdb has been presented to the target, this area serves to store
* residual transfer information and the SCSI status byte.
* For the target role, the contents of this area do not change, but
* still serve a different purpose than for the initiator role. See
* struct target_data for details.
*/
/*
* Status information embedded in the shared poriton of
* an SCB after passing the cdb to the target. The kernel
* driver will only read this data for transactions that
* complete abnormally (non-zero status byte).
*/
struct status_pkt {
uint32_t residual_datacnt; /* Residual in the current S/G seg */
uint32_t residual_sg_ptr; /* The next S/G for this transfer */
uint8_t scsi_status; /* Standard SCSI status byte */
};
/*
* Target mode version of the shared data SCB segment.
*/
struct target_data {
uint32_t residual_datacnt; /* Residual in the current S/G seg */
uint32_t residual_sg_ptr; /* The next S/G for this transfer */
uint8_t scsi_status; /* SCSI status to give to initiator */
uint8_t target_phases; /* Bitmap of phases to execute */
uint8_t data_phase; /* Data-In or Data-Out */
uint8_t initiator_tag; /* Initiator's transaction tag */
};
struct hardware_scb {
/*0*/ union {
/*
* If the cdb is 12 bytes or less, we embed it directly
* in the SCB. For longer cdbs, we embed the address
* of the cdb payload as seen by the chip and a DMA
* is used to pull it in.
*/
uint8_t cdb[12];
uint32_t cdb_ptr;
struct status_pkt status;
struct target_data tdata;
} shared_data;
/*
* A word about residuals.
* The scb is presented to the sequencer with the dataptr and datacnt
* fields initialized to the contents of the first S/G element to
* transfer. The sgptr field is initialized to the bus address for
* the S/G element that follows the first in the in core S/G array
* or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid
* S/G entry for this transfer (single S/G element transfer with the
* first elements address and length preloaded in the dataptr/datacnt
* fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL.
* The SG_FULL_RESID flag ensures that the residual will be correctly
* noted even if no data transfers occur. Once the data phase is entered,
* the residual sgptr and datacnt are loaded from the sgptr and the
* datacnt fields. After each S/G element's dataptr and length are
* loaded into the hardware, the residual sgptr is advanced. After
* each S/G element is expired, its datacnt field is checked to see
* if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the
* residual sg ptr and the transfer is considered complete. If the
* sequencer determines that there is a residual in the tranfer, it
* will set the SG_RESID_VALID flag in sgptr and dma the scb back into
* host memory. To sumarize:
*
* Sequencer:
* o A residual has occurred if SG_FULL_RESID is set in sgptr,
* or residual_sgptr does not have SG_LIST_NULL set.
*
* o We are transferring the last segment if residual_datacnt has
* the SG_LAST_SEG flag set.
*
* Host:
* o A residual has occurred if a completed scb has the
* SG_RESID_VALID flag set.
*
* o residual_sgptr and sgptr refer to the "next" sg entry
* and so may point beyond the last valid sg entry for the
* transfer.
*/
/*12*/ uint32_t dataptr;
/*16*/ uint32_t datacnt; /*
* Byte 3 (numbered from 0) of
* the datacnt is really the
* 4th byte in that data address.
*/
/*20*/ uint32_t sgptr;
#define SG_PTR_MASK 0xFFFFFFF8
/*24*/ uint8_t control; /* See SCB_CONTROL in aic7xxx.reg for details */
/*25*/ uint8_t scsiid; /* what to load in the SCSIID register */
/*26*/ uint8_t lun;
/*27*/ uint8_t tag; /*
* Index into our kernel SCB array.
* Also used as the tag for tagged I/O
*/
/*28*/ uint8_t cdb_len;
/*29*/ uint8_t scsirate; /* Value for SCSIRATE register */
/*30*/ uint8_t scsioffset; /* Value for SCSIOFFSET register */
/*31*/ uint8_t next; /*
* Used for threading SCBs in the
* "Waiting for Selection" and
* "Disconnected SCB" lists down
* in the sequencer.
*/
/*32*/ uint8_t cdb32[32]; /*
* CDB storage for cdbs of size
* 13->32. We store them here
* because hardware scbs are
* allocated from DMA safe
* memory so we are guaranteed
* the controller can access
* this data.
*/
};
/************************ Kernel SCB Definitions ******************************/
/*
* Some fields of the SCB are OS dependent. Here we collect the
* definitions for elements that all OS platforms need to include
* in there SCB definition.
*/
/*
* Definition of a scatter/gather element as transferred to the controller.
* The aic7xxx chips only support a 24bit length. We use the top byte of
* the length to store additional address bits and a flag to indicate
* that a given segment terminates the transfer. This gives us an
* addressable range of 512GB on machines with 64bit PCI or with chips
* that can support dual address cycles on 32bit PCI busses.
*/
struct ahc_dma_seg {
uint32_t addr;
uint32_t len;
#define AHC_DMA_LAST_SEG 0x80000000
#define AHC_SG_HIGH_ADDR_MASK 0x7F000000
#define AHC_SG_LEN_MASK 0x00FFFFFF
};
struct sg_map_node {
bus_dmamap_t sg_dmamap;
dma_addr_t sg_physaddr;
struct ahc_dma_seg* sg_vaddr;
SLIST_ENTRY(sg_map_node) links;
};
/*
* The current state of this SCB.
*/
typedef enum {
SCB_FREE = 0x0000,
SCB_OTHERTCL_TIMEOUT = 0x0002,/*
* Another device was active
* during the first timeout for
* this SCB so we gave ourselves
* an additional timeout period
* in case it was hogging the
* bus.
*/
SCB_DEVICE_RESET = 0x0004,
SCB_SENSE = 0x0008,
SCB_CDB32_PTR = 0x0010,
SCB_RECOVERY_SCB = 0x0020,
SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */
SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */
SCB_ABORT = 0x0100,
SCB_UNTAGGEDQ = 0x0200,
SCB_ACTIVE = 0x0400,
SCB_TARGET_IMMEDIATE = 0x0800,
SCB_TRANSMISSION_ERROR = 0x1000,/*
* We detected a parity or CRC
* error that has effected the
* payload of the command. This
* flag is checked when normal
* status is returned to catch
* the case of a target not
* responding to our attempt
* to report the error.
*/
SCB_TARGET_SCB = 0x2000,
SCB_SILENT = 0x4000 /*
* Be quiet about transmission type
* errors. They are expected and we
* don't want to upset the user. This
* flag is typically used during DV.
*/
} scb_flag;
struct scb {
struct hardware_scb *hscb;
union {
SLIST_ENTRY(scb) sle;
TAILQ_ENTRY(scb) tqe;
} links;
LIST_ENTRY(scb) pending_links;
ahc_io_ctx_t io_ctx;
struct ahc_softc *ahc_softc;
scb_flag flags;
struct scb_platform_data *platform_data;
struct sg_map_node *sg_map;
struct ahc_dma_seg *sg_list;
dma_addr_t sg_list_phys;
u_int sg_count;/* How full ahc_dma_seg is */
};
struct scb_data {
SLIST_HEAD(, scb) free_scbs; /*
* Pool of SCBs ready to be assigned
* commands to execute.
*/
struct scb *scbindex[256]; /*
* Mapping from tag to SCB.
* As tag identifiers are an
* 8bit value, we provide space
* for all possible tag values.
* Any lookups to entries at or
* above AHC_SCB_MAX_ALLOC will
* always fail.
*/
struct hardware_scb *hscbs; /* Array of hardware SCBs */
struct scb *scbarray; /* Array of kernel SCBs */
struct scsi_sense_data *sense; /* Per SCB sense data */
/*
* "Bus" addresses of our data structures.
*/
bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */
bus_dmamap_t hscb_dmamap;
dma_addr_t hscb_busaddr;
bus_dma_tag_t sense_dmat;
bus_dmamap_t sense_dmamap;
dma_addr_t sense_busaddr;
bus_dma_tag_t sg_dmat; /* dmat for our sg segments */
SLIST_HEAD(, sg_map_node) sg_maps;
uint8_t numscbs;
uint8_t maxhscbs; /* Number of SCBs on the card */
uint8_t init_level; /*
* How far we've initialized
* this structure.
*/
};
/************************ Target Mode Definitions *****************************/
/*
* Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
uint8_t identify; /* Identify message */
uint8_t bytes[22]; /*
* Bytes contains any additional message
* bytes terminated by 0xFF. The remainder
* is the cdb to execute.
*/
uint8_t cmd_valid; /*
* When a command is complete, the firmware
* will set cmd_valid to all bits set.
* After the host has seen the command,
* the bits are cleared. This allows us
* to just peek at host memory to determine
* if more work is complete. cmd_valid is on
* an 8 byte boundary to simplify setting
* it on aic7880 hardware which only has
* limited direct access to the DMA FIFO.
*/
uint8_t pad[7];
};
/*
* Number of events we can buffer up if we run out
* of immediate notify ccbs.
*/
#define AHC_TMODE_EVENT_BUFFER_SIZE 8
struct ahc_tmode_event {
uint8_t initiator_id;
uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */
#define EVENT_TYPE_BUS_RESET 0xFF
uint8_t event_arg;
};
/*
* Per enabled lun target mode state.
* As this state is directly influenced by the host OS'es target mode
* environment, we let the OS module define it. Forward declare the
* structure here so we can store arrays of them, etc. in OS neutral
* data structures.
*/
#ifdef AHC_TARGET_MODE
struct ahc_tmode_lstate {
struct cam_path *path;
struct ccb_hdr_slist accept_tios;
struct ccb_hdr_slist immed_notifies;
struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE];
uint8_t event_r_idx;
uint8_t event_w_idx;
};
#else
struct ahc_tmode_lstate;
#endif
/******************** Transfer Negotiation Datastructures *********************/
#define AHC_TRANS_CUR 0x01 /* Modify current neogtiation status */
#define AHC_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */
#define AHC_TRANS_GOAL 0x04 /* Modify negotiation goal */
#define AHC_TRANS_USER 0x08 /* Modify user negotiation settings */
#define AHC_WIDTH_UNKNOWN 0xFF
#define AHC_PERIOD_UNKNOWN 0xFF
#define AHC_OFFSET_UNKNOWN 0xFF
#define AHC_PPR_OPTS_UNKNOWN 0xFF
/*
* Transfer Negotiation Information.
*/
struct ahc_transinfo {
uint8_t protocol_version; /* SCSI Revision level */
uint8_t transport_version; /* SPI Revision level */
uint8_t width; /* Bus width */
uint8_t period; /* Sync rate factor */
uint8_t offset; /* Sync offset */
uint8_t ppr_options; /* Parallel Protocol Request options */
};
/*
* Per-initiator current, goal and user transfer negotiation information. */
struct ahc_initiator_tinfo {
uint8_t scsirate; /* Computed value for SCSIRATE reg */
struct ahc_transinfo curr;
struct ahc_transinfo goal;
struct ahc_transinfo user;
};
/*
* Per enabled target ID state.
* Pointers to lun target state as well as sync/wide negotiation information
* for each initiator<->target mapping. For the initiator role we pretend
* that we are the target and the targets are the initiators since the
* negotiation is the same regardless of role.
*/
struct ahc_tmode_tstate {
struct ahc_tmode_lstate* enabled_luns[AHC_NUM_LUNS];
struct ahc_initiator_tinfo transinfo[AHC_NUM_TARGETS];
/*
* Per initiator state bitmasks.
*/
uint16_t auto_negotiate;/* Auto Negotiation Required */
uint16_t ultraenb; /* Using ultra sync rate */
uint16_t discenable; /* Disconnection allowed */
uint16_t tagenable; /* Tagged Queuing allowed */
};
/*
* Data structure for our table of allowed synchronous transfer rates.
*/
struct ahc_syncrate {
u_int sxfr_u2; /* Value of the SXFR parameter for Ultra2+ Chips */
u_int sxfr; /* Value of the SXFR parameter for <= Ultra Chips */
#define ULTRA_SXFR 0x100 /* Rate Requires Ultra Mode set */
#define ST_SXFR 0x010 /* Rate Single Transition Only */
#define DT_SXFR 0x040 /* Rate Double Transition Only */
uint8_t period; /* Period to send to SCSI target */
const char *rate;
};
/* Safe and valid period for async negotiations. */
#define AHC_ASYNC_XFER_PERIOD 0x45
#define AHC_ULTRA2_XFER_PERIOD 0x0a
/*
* Indexes into our table of syncronous transfer rates.
*/
#define AHC_SYNCRATE_DT 0
#define AHC_SYNCRATE_ULTRA2 1
#define AHC_SYNCRATE_ULTRA 3
#define AHC_SYNCRATE_FAST 6
#define AHC_SYNCRATE_MAX AHC_SYNCRATE_DT
#define AHC_SYNCRATE_MIN 13
/***************************** Lookup Tables **********************************/
/*
* Phase -> name and message out response
* to parity errors in each phase table.
*/
struct ahc_phase_table_entry {
uint8_t phase;
uint8_t mesg_out; /* Message response to parity errors */
char *phasemsg;
};
/************************** Serial EEPROM Format ******************************/
struct seeprom_config {
/*
* Per SCSI ID Configuration Flags
*/
uint16_t device_flags[16]; /* words 0-15 */
#define CFXFER 0x0007 /* synchronous transfer rate */
#define CFSYNCH 0x0008 /* enable synchronous transfer */
#define CFDISC 0x0010 /* enable disconnection */
#define CFWIDEB 0x0020 /* wide bus device */
#define CFSYNCHISULTRA 0x0040 /* CFSYNCH is an ultra offset (2940AU)*/
#define CFSYNCSINGLE 0x0080 /* Single-Transition signalling */
#define CFSTART 0x0100 /* send start unit SCSI command */
#define CFINCBIOS 0x0200 /* include in BIOS scan */
#define CFRNFOUND 0x0400 /* report even if not found */
#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */
#define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */
#define CFWBCACHENOP 0xc000 /* Don't touch W-Behind Cache */
/*
* BIOS Control Bits
*/
uint16_t bios_control; /* word 16 */
#define CFSUPREM 0x0001 /* support all removeable drives */
#define CFSUPREMB 0x0002 /* support removeable boot drives */
#define CFBIOSEN 0x0004 /* BIOS enabled */
#define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */
#define CFSM2DRV 0x0010 /* support more than two drives */
#define CFSTPWLEVEL 0x0010 /* Termination level control */
#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
#define CFTERM_MENU 0x0040 /* BIOS displays termination menu */
#define CFEXTEND 0x0080 /* extended translation enabled */
#define CFSCAMEN 0x0100 /* SCAM enable */
#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */
#define CFMSG_VERBOSE 0x0000
#define CFMSG_SILENT 0x0200
#define CFMSG_DIAG 0x0400
#define CFBOOTCD 0x0800 /* Support Bootable CD-ROM */
/* UNUSED 0xff00 */
/*
* Host Adapter Control Bits
*/
uint16_t adapter_control; /* word 17 */
#define CFAUTOTERM 0x0001 /* Perform Auto termination */
#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable */
#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
#define CFSTERM 0x0004 /* SCSI low byte termination */
#define CFWSTERM 0x0008 /* SCSI high byte termination */
#define CFSPARITY 0x0010 /* SCSI parity */
#define CF284XSTERM 0x0020 /* SCSI low byte term (284x cards) */
#define CFMULTILUN 0x0020
#define CFRESETB 0x0040 /* reset SCSI bus at boot */
#define CFCLUSTERENB 0x0080 /* Cluster Enable */
#define CFBOOTCHAN 0x0300 /* probe this channel first */
#define CFBOOTCHANSHIFT 8
#define CFSEAUTOTERM 0x0400 /* Ultra2 Perform secondary Auto Term*/
#define CFSELOWTERM 0x0800 /* Ultra2 secondary low term */
#define CFSEHIGHTERM 0x1000 /* Ultra2 secondary high term */
#define CFENABLEDV 0x4000 /* Perform Domain Validation*/
/*
* Bus Release Time, Host Adapter ID
*/
uint16_t brtime_id; /* word 18 */
#define CFSCSIID 0x000f /* host adapter SCSI ID */
/* UNUSED 0x00f0 */
#define CFBRTIME 0xff00 /* bus release time */
/*
* Maximum targets
*/
uint16_t max_targets; /* word 19 */
#define CFMAXTARG 0x00ff /* maximum targets */
#define CFBOOTLUN 0x0f00 /* Lun to boot from */
#define CFBOOTID 0xf000 /* Target to boot from */
uint16_t res_1[10]; /* words 20-29 */
uint16_t signature; /* Signature == 0x250 */
#define CFSIGNATURE 0x250
#define CFSIGNATURE2 0x300
uint16_t checksum; /* word 31 */
};
/**************************** Message Buffer *********************************/
typedef enum {
MSG_TYPE_NONE = 0x00,
MSG_TYPE_INITIATOR_MSGOUT = 0x01,
MSG_TYPE_INITIATOR_MSGIN = 0x02,
MSG_TYPE_TARGET_MSGOUT = 0x03,
MSG_TYPE_TARGET_MSGIN = 0x04
} ahc_msg_type;
typedef enum {
MSGLOOP_IN_PROG,
MSGLOOP_MSGCOMPLETE,
MSGLOOP_TERMINATED
} msg_loop_stat;
/*********************** Software Configuration Structure *********************/
TAILQ_HEAD(scb_tailq, scb);
struct ahc_aic7770_softc {
/*
* Saved register state used for chip_init().
*/
uint8_t busspd;
uint8_t bustime;
};
struct ahc_pci_softc {
/*
* Saved register state used for chip_init().
*/
uint32_t devconfig;
uint16_t targcrccnt;
uint8_t command;
uint8_t csize_lattime;
uint8_t optionmode;
uint8_t crccontrol1;
uint8_t dscommand0;
uint8_t dspcistatus;
uint8_t scbbaddr;
uint8_t dff_thrsh;
};
union ahc_bus_softc {
struct ahc_aic7770_softc aic7770_softc;
struct ahc_pci_softc pci_softc;
};
typedef void (*ahc_bus_intr_t)(struct ahc_softc *);
typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *);
typedef void ahc_callback_t (void *);
struct ahc_softc {
bus_space_tag_t tag;
bus_space_handle_t bsh;
struct scb_data *scb_data;
struct scb *next_queued_scb;
/*
* SCBs that have been sent to the controller
*/
BSD_LIST_HEAD(, scb) pending_scbs;
/*
* Counting lock for deferring the release of additional
* untagged transactions from the untagged_queues. When
* the lock is decremented to 0, all queues in the
* untagged_queues array are run.
*/
u_int untagged_queue_lock;
/*
* Per-target queue of untagged-transactions. The
* transaction at the head of the queue is the
* currently pending untagged transaction for the
* target. The driver only allows a single untagged
* transaction per target.
*/
struct scb_tailq untagged_queues[AHC_NUM_TARGETS];
/*
* Bus attachment specific data.
*/
union ahc_bus_softc bus_softc;
/*
* Platform specific data.
*/
struct ahc_platform_data *platform_data;
/*
* Platform specific device information.
*/
ahc_dev_softc_t dev_softc;
struct device *dev;
/*
* Bus specific device information.
*/
ahc_bus_intr_t bus_intr;
/*
* Bus specific initialization required
* after a chip reset.
*/
ahc_bus_chip_init_t bus_chip_init;
/*
* Target mode related state kept on a per enabled lun basis.
* Targets that are not enabled will have null entries.
* As an initiator, we keep one target entry for our initiator
* ID to store our sync/wide transfer settings.
*/
struct ahc_tmode_tstate *enabled_targets[AHC_NUM_TARGETS];
/*
* The black hole device responsible for handling requests for
* disabled luns on enabled targets.
*/
struct ahc_tmode_lstate *black_hole;
/*
* Device instance currently on the bus awaiting a continue TIO
* for a command that was not given the disconnect priveledge.
*/
struct ahc_tmode_lstate *pending_device;
/*
* Card characteristics
*/
ahc_chip chip;
ahc_feature features;
ahc_bug bugs;
ahc_flag flags;
struct seeprom_config *seep_config;
/* Values to store in the SEQCTL register for pause and unpause */
uint8_t unpause;
uint8_t pause;
/* Command Queues */
uint8_t qoutfifonext;
uint8_t qinfifonext;
uint8_t *qoutfifo;
uint8_t *qinfifo;
/* Critical Section Data */
struct cs *critical_sections;
u_int num_critical_sections;
/* Channel Names ('A', 'B', etc.) */
char channel;
char channel_b;
/* Initiator Bus ID */
uint8_t our_id;
uint8_t our_id_b;
/*
* PCI error detection.
*/
int unsolicited_ints;
/*
* Target incoming command FIFO.
*/
struct target_cmd *targetcmds;
uint8_t tqinfifonext;
/*
* Cached copy of the sequencer control register.
*/
uint8_t seqctl;
/*
* Incoming and outgoing message handling.
*/
uint8_t send_msg_perror;
ahc_msg_type msg_type;
uint8_t msgout_buf[12];/* Message we are sending */
uint8_t msgin_buf[12];/* Message we are receiving */
u_int msgout_len; /* Length of message to send */
u_int msgout_index; /* Current index in msgout */
u_int msgin_index; /* Current index in msgin */
/*
* Mapping information for data structures shared
* between the sequencer and kernel.
*/
bus_dma_tag_t parent_dmat;
bus_dma_tag_t shared_data_dmat;
bus_dmamap_t shared_data_dmamap;
dma_addr_t shared_data_busaddr;
/*
* Bus address of the one byte buffer used to
* work-around a DMA bug for chips <= aic7880
* in target mode.
*/
dma_addr_t dma_bug_buf;
/* Number of enabled target mode device on this card */
u_int enabled_luns;
/* Initialization level of this data structure */
u_int init_level;
/* PCI cacheline size. */
u_int pci_cachesize;
/*
* Count of parity errors we have seen as a target.
* We auto-disable parity error checking after seeing
* AHC_PCI_TARGET_PERR_THRESH number of errors.
*/
u_int pci_target_perr_count;
#define AHC_PCI_TARGET_PERR_THRESH 10
/* Maximum number of sequencer instructions supported. */
u_int instruction_ram_size;
/* Per-Unit descriptive information */
const char *description;
char *name;
int unit;
/* Selection Timer settings */
int seltime;
int seltime_b;
uint16_t user_discenable;/* Disconnection allowed */
uint16_t user_tagenable;/* Tagged Queuing allowed */
};
/************************ Active Device Information ***************************/
typedef enum {
ROLE_UNKNOWN,
ROLE_INITIATOR,
ROLE_TARGET
} role_t;
struct ahc_devinfo {
int our_scsiid;
int target_offset;
uint16_t target_mask;
u_int target;
u_int lun;
char channel;
role_t role; /*
* Only guaranteed to be correct if not
* in the busfree state.
*/
};
/****************************** PCI Structures ********************************/
typedef int (ahc_device_setup_t)(struct ahc_softc *);
struct ahc_pci_identity {
uint64_t full_id;
uint64_t id_mask;
const char *name;
ahc_device_setup_t *setup;
};
/***************************** VL/EISA Declarations ***************************/
struct aic7770_identity {
uint32_t full_id;
uint32_t id_mask;
const char *name;
ahc_device_setup_t *setup;
};
extern struct aic7770_identity aic7770_ident_table[];
#define AHC_EISA_SLOT_OFFSET 0xc00
#define AHC_EISA_IOSIZE 0x100
/*************************** Function Declarations ****************************/
/******************************************************************************/
/***************************** PCI Front End *********************************/
const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
int ahc_pci_config(struct ahc_softc *,
const struct ahc_pci_identity *);
int ahc_pci_test_register_access(struct ahc_softc *);
void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc);
/*************************** EISA/VL Front End ********************************/
int aic7770_config(struct ahc_softc *ahc,
struct aic7770_identity *,
u_int port);
/************************** SCB and SCB queue management **********************/
int ahc_probe_scbs(struct ahc_softc *);
void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
struct scb *scb);
int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
int target, char channel, int lun,
u_int tag, role_t role);
/****************************** Initialization ********************************/
struct ahc_softc *ahc_alloc(void *platform_arg, char *name);
int ahc_softc_init(struct ahc_softc *);
void ahc_controller_info(struct ahc_softc *ahc, char *buf);
int ahc_chip_init(struct ahc_softc *ahc);
int ahc_init(struct ahc_softc *ahc);
void ahc_intr_enable(struct ahc_softc *ahc, int enable);
void ahc_pause_and_flushwork(struct ahc_softc *ahc);
int __maybe_unused ahc_suspend(struct ahc_softc *ahc);
int __maybe_unused ahc_resume(struct ahc_softc *ahc);
void ahc_set_unit(struct ahc_softc *, int);
void ahc_set_name(struct ahc_softc *, char *);
void ahc_free(struct ahc_softc *ahc);
int ahc_reset(struct ahc_softc *ahc, int reinit);
/***************************** Error Recovery *********************************/
typedef enum {
SEARCH_COMPLETE,
SEARCH_COUNT,
SEARCH_REMOVE
} ahc_search_action;
int ahc_search_qinfifo(struct ahc_softc *ahc, int target,
char channel, int lun, u_int tag,
role_t role, uint32_t status,
ahc_search_action action);
int ahc_search_untagged_queues(struct ahc_softc *ahc,
ahc_io_ctx_t ctx,
int target, char channel,
int lun, uint32_t status,
ahc_search_action action);
int ahc_search_disc_list(struct ahc_softc *ahc, int target,
char channel, int lun, u_int tag,
int stop_on_first, int remove,
int save_state);
int ahc_reset_channel(struct ahc_softc *ahc, char channel,
int initiate_reset);
/*************************** Utility Functions ********************************/
void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
u_int our_id, u_int target,
u_int lun, char channel,
role_t role);
/************************** Transfer Negotiation ******************************/
const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int *ppr_options, u_int maxsync);
u_int ahc_find_period(struct ahc_softc *ahc,
u_int scsirate, u_int maxsync);
/*
* Negotiation types. These are used to qualify if we should renegotiate
* even if our goal and current transport parameters are identical.
*/
typedef enum {
AHC_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */
AHC_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */
AHC_NEG_ALWAYS /* Renegotiat even if goal is async. */
} ahc_neg_type;
int ahc_update_neg_request(struct ahc_softc*,
struct ahc_devinfo*,
struct ahc_tmode_tstate*,
struct ahc_initiator_tinfo*,
ahc_neg_type);
void ahc_set_width(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo,
u_int width, u_int type, int paused);
void ahc_set_syncrate(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo,
const struct ahc_syncrate *syncrate,
u_int period, u_int offset,
u_int ppr_options,
u_int type, int paused);
typedef enum {
AHC_QUEUE_NONE,
AHC_QUEUE_BASIC,
AHC_QUEUE_TAGGED
} ahc_queue_alg;
/**************************** Target Mode *************************************/
#ifdef AHC_TARGET_MODE
void ahc_send_lstate_events(struct ahc_softc *,
struct ahc_tmode_lstate *);
void ahc_handle_en_lun(struct ahc_softc *ahc,
struct cam_sim *sim, union ccb *ccb);
cam_status ahc_find_tmode_devs(struct ahc_softc *ahc,
struct cam_sim *sim, union ccb *ccb,
struct ahc_tmode_tstate **tstate,
struct ahc_tmode_lstate **lstate,
int notfound_failure);
#ifndef AHC_TMODE_ENABLE
#define AHC_TMODE_ENABLE 0
#endif
#endif
/******************************* Debug ***************************************/
#ifdef AHC_DEBUG
extern uint32_t ahc_debug;
#define AHC_SHOW_MISC 0x0001
#define AHC_SHOW_SENSE 0x0002
#define AHC_DUMP_SEEPROM 0x0004
#define AHC_SHOW_TERMCTL 0x0008
#define AHC_SHOW_MEMORY 0x0010
#define AHC_SHOW_MESSAGES 0x0020
#define AHC_SHOW_DV 0x0040
#define AHC_SHOW_SELTO 0x0080
#define AHC_SHOW_QFULL 0x0200
#define AHC_SHOW_QUEUE 0x0400
#define AHC_SHOW_TQIN 0x0800
#define AHC_SHOW_MASKED_ERRORS 0x1000
#define AHC_DEBUG_SEQUENCER 0x2000
#endif
void ahc_print_devinfo(struct ahc_softc *ahc,
struct ahc_devinfo *dev);
void ahc_dump_card_state(struct ahc_softc *ahc);
int ahc_print_register(const ahc_reg_parse_entry_t *table,
u_int num_entries,
const char *name,
u_int address,
u_int value,
u_int *cur_column,
u_int wrap_point);
/******************************* SEEPROM *************************************/
int ahc_acquire_seeprom(struct ahc_softc *ahc,
struct seeprom_descriptor *sd);
void ahc_release_seeprom(struct seeprom_descriptor *sd);
#endif /* _AIC7XXX_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 1996-2001 Vojtech Pavlik
*/
/*
* This is just a very simple driver that can dump the data
* out of the joystick port into the syslog ...
*/
#include <linux/module.h>
#include <linux/gameport.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#define DRIVER_DESC "Gameport data dumper module"
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define BUF_SIZE 256
struct joydump {
unsigned int time;
unsigned char data;
};
static int joydump_connect(struct gameport *gameport, struct gameport_driver *drv)
{
struct joydump *buf; /* all entries */
struct joydump *dump, *prev; /* one entry each */
int axes[4], buttons;
int i, j, t, timeout;
unsigned long flags;
unsigned char u;
printk(KERN_INFO "joydump: ,------------------ START ----------------.\n");
printk(KERN_INFO "joydump: | Dumping: %30s |\n", gameport->phys);
printk(KERN_INFO "joydump: | Speed: %28d kHz |\n", gameport->speed);
if (gameport_open(gameport, drv, GAMEPORT_MODE_RAW)) {
printk(KERN_INFO "joydump: | Raw mode not available - trying cooked. |\n");
if (gameport_open(gameport, drv, GAMEPORT_MODE_COOKED)) {
printk(KERN_INFO "joydump: | Cooked not available either. Failing. |\n");
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
return -ENODEV;
}
gameport_cooked_read(gameport, axes, &buttons);
for (i = 0; i < 4; i++)
printk(KERN_INFO "joydump: | Axis %d: %4d. |\n", i, axes[i]);
printk(KERN_INFO "joydump: | Buttons %02x. |\n", buttons);
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
}
timeout = gameport_time(gameport, 10000); /* 10 ms */
buf = kmalloc_array(BUF_SIZE, sizeof(struct joydump), GFP_KERNEL);
if (!buf) {
printk(KERN_INFO "joydump: no memory for testing\n");
goto jd_end;
}
dump = buf;
t = 0;
i = 1;
local_irq_save(flags);
u = gameport_read(gameport);
dump->data = u;
dump->time = t;
dump++;
gameport_trigger(gameport);
while (i < BUF_SIZE && t < timeout) {
dump->data = gameport_read(gameport);
if (dump->data ^ u) {
u = dump->data;
dump->time = t;
i++;
dump++;
}
t++;
}
local_irq_restore(flags);
/*
* Dump data.
*/
t = i;
dump = buf;
prev = dump;
printk(KERN_INFO "joydump: >------------------ DATA -----------------<\n");
printk(KERN_INFO "joydump: | index: %3d delta: %3d us data: ", 0, 0);
for (j = 7; j >= 0; j--)
printk("%d", (dump->data >> j) & 1);
printk(" |\n");
dump++;
for (i = 1; i < t; i++, dump++, prev++) {
printk(KERN_INFO "joydump: | index: %3d delta: %3d us data: ",
i, dump->time - prev->time);
for (j = 7; j >= 0; j--)
printk("%d", (dump->data >> j) & 1);
printk(" |\n");
}
kfree(buf);
jd_end:
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
return 0;
}
static void joydump_disconnect(struct gameport *gameport)
{
gameport_close(gameport);
}
static struct gameport_driver joydump_drv = {
.driver = {
.name = "joydump",
},
.description = DRIVER_DESC,
.connect = joydump_connect,
.disconnect = joydump_disconnect,
};
module_gameport_driver(joydump_drv);
|
// SPDX-License-Identifier: GPL-2.0+
/* Siemens ID Mouse driver v0.6
Copyright (C) 2004-5 by Florian 'Floe' Echtler <[email protected]>
and Andreas 'ad' Deresch <[email protected]>
Derived from the USB Skeleton driver 1.1,
Copyright (C) 2003 Greg Kroah-Hartman ([email protected])
Additional information provided by Martin Reising
<[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
/* image constants */
#define WIDTH 225
#define HEIGHT 289
#define HEADER "P5 225 289 255 "
#define IMGSIZE ((WIDTH * HEIGHT) + sizeof(HEADER)-1)
#define DRIVER_SHORT "idmouse"
#define DRIVER_AUTHOR "Florian 'Floe' Echtler <[email protected]>"
#define DRIVER_DESC "Siemens ID Mouse FingerTIP Sensor Driver"
/* minor number for misc USB devices */
#define USB_IDMOUSE_MINOR_BASE 132
/* vendor and device IDs */
#define ID_SIEMENS 0x0681
#define ID_IDMOUSE 0x0005
#define ID_CHERRY 0x0010
/* device ID table */
static const struct usb_device_id idmouse_table[] = {
{USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
{USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
{} /* terminating null entry */
};
/* sensor commands */
#define FTIP_RESET 0x20
#define FTIP_ACQUIRE 0x21
#define FTIP_RELEASE 0x22
#define FTIP_BLINK 0x23 /* LSB of value = blink pulse width */
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface; /* the interface for this device */
unsigned char *bulk_in_buffer; /* the buffer to receive data */
size_t bulk_in_size; /* the maximum bulk packet size */
size_t orig_bi_size; /* same as above, but reported by the device */
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
int open; /* if the port is open or not */
int present; /* if the device is not disconnected */
struct mutex lock; /* locks this structure */
};
/* local function prototypes */
static ssize_t idmouse_read(struct file *file, char __user *buffer,
size_t count, loff_t * ppos);
static int idmouse_open(struct inode *inode, struct file *file);
static int idmouse_release(struct inode *inode, struct file *file);
static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void idmouse_disconnect(struct usb_interface *interface);
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message);
static int idmouse_resume(struct usb_interface *intf);
/* file operation pointers */
static const struct file_operations idmouse_fops = {
.owner = THIS_MODULE,
.read = idmouse_read,
.open = idmouse_open,
.release = idmouse_release,
.llseek = default_llseek,
};
/* class driver information */
static struct usb_class_driver idmouse_class = {
.name = "idmouse%d",
.fops = &idmouse_fops,
.minor_base = USB_IDMOUSE_MINOR_BASE,
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver idmouse_driver = {
.name = DRIVER_SHORT,
.probe = idmouse_probe,
.disconnect = idmouse_disconnect,
.suspend = idmouse_suspend,
.resume = idmouse_resume,
.reset_resume = idmouse_resume,
.id_table = idmouse_table,
.supports_autosuspend = 1,
};
static int idmouse_create_image(struct usb_idmouse *dev)
{
int bytes_read;
int bulk_read;
int result;
memcpy(dev->bulk_in_buffer, HEADER, sizeof(HEADER)-1);
bytes_read = sizeof(HEADER)-1;
/* reset the device and set a fast blink rate */
result = ftip_command(dev, FTIP_RELEASE, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_BLINK, 1, 0);
if (result < 0)
goto reset;
/* initialize the sensor - sending this command twice */
/* significantly reduces the rate of failed reads */
result = ftip_command(dev, FTIP_ACQUIRE, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_ACQUIRE, 0, 0);
if (result < 0)
goto reset;
/* start the readout - sending this command twice */
/* presumably enables the high dynamic range mode */
result = ftip_command(dev, FTIP_RESET, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_RESET, 0, 0);
if (result < 0)
goto reset;
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
result = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
/* Maybe this error was caused by the increased packet size? */
/* Reset to the original value and tell userspace to retry. */
if (dev->bulk_in_size != dev->orig_bi_size) {
dev->bulk_in_size = dev->orig_bi_size;
result = -EAGAIN;
}
break;
}
if (signal_pending(current)) {
result = -EINTR;
break;
}
bytes_read += bulk_read;
}
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
if (dev->bulk_in_buffer[bytes_read] != 0x00)
return -EAGAIN;
/* lower border should be white (0xFF) */
for (bytes_read = IMGSIZE-WIDTH; bytes_read < IMGSIZE-1; bytes_read++)
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
/* reset the device */
reset:
ftip_command(dev, FTIP_RELEASE, 0, 0);
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
return result;
}
/* PM operations are nops as this driver does IO only during open() */
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int idmouse_resume(struct usb_interface *intf)
{
return 0;
}
static inline void idmouse_delete(struct usb_idmouse *dev)
{
kfree(dev->bulk_in_buffer);
kfree(dev);
}
static int idmouse_open(struct inode *inode, struct file *file)
{
struct usb_idmouse *dev;
struct usb_interface *interface;
int result;
/* get the interface from minor number and driver information */
interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
if (!dev)
return -ENODEV;
/* lock this device */
mutex_lock(&dev->lock);
/* check if already open */
if (dev->open) {
/* already open, so fail */
result = -EBUSY;
} else {
/* create a new image and check for success */
result = usb_autopm_get_interface(interface);
if (result)
goto error;
result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
/* increment our usage count for the driver */
++dev->open;
/* save our object in the file's private structure */
file->private_data = dev;
}
error:
/* unlock this device */
mutex_unlock(&dev->lock);
return result;
}
static int idmouse_release(struct inode *inode, struct file *file)
{
struct usb_idmouse *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* lock our device */
mutex_lock(&dev->lock);
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
}
return 0;
}
static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count,
loff_t * ppos)
{
struct usb_idmouse *dev = file->private_data;
int result;
/* lock this object */
mutex_lock(&dev->lock);
/* verify that the device wasn't unplugged */
if (!dev->present) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
result = simple_read_from_buffer(buffer, count, ppos,
dev->bulk_in_buffer, IMGSIZE);
/* unlock the device */
mutex_unlock(&dev->lock);
return result;
}
static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_idmouse *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int result;
/* check if we have gotten the data or the hid interface */
iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
if (iface_desc->desc.bNumEndpoints < 1)
return -ENODEV;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
return -ENOMEM;
mutex_init(&dev->lock);
dev->udev = udev;
dev->interface = interface;
/* set up the endpoint information - use only the first bulk-in endpoint */
result = usb_find_bulk_in_endpoint(iface_desc, &endpoint);
if (result) {
dev_err(&interface->dev, "Unable to find bulk-in endpoint.\n");
idmouse_delete(dev);
return result;
}
dev->orig_bi_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = 0x200; /* works _much_ faster */
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
idmouse_delete(dev);
return -ENOMEM;
}
/* allow device read, write and ioctl */
dev->present = 1;
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
result = usb_register_dev(interface, &idmouse_class);
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
idmouse_delete(dev);
return result;
}
/* be noisy */
dev_info(&interface->dev,"%s now attached\n",DRIVER_DESC);
return 0;
}
static void idmouse_disconnect(struct usb_interface *interface)
{
struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
/* lock the device */
mutex_lock(&dev->lock);
/* prevent device read, write and ioctl */
dev->present = 0;
/* if the device is opened, idmouse_release will clean this up */
if (!dev->open) {
mutex_unlock(&dev->lock);
idmouse_delete(dev);
} else {
/* unlock */
mutex_unlock(&dev->lock);
}
dev_info(&interface->dev, "disconnected\n");
}
module_usb_driver(idmouse_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
saa7146.o - driver for generic saa7146-based hardware
Copyright (C) 1998-2003 Michael Hunold <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/drv-intf/saa7146.h>
#include <linux/module.h>
static int saa7146_num;
unsigned int saa7146_debug;
module_param(saa7146_debug, uint, 0644);
MODULE_PARM_DESC(saa7146_debug, "debug level (default: 0)");
#if 0
static void dump_registers(struct saa7146_dev* dev)
{
int i = 0;
pr_info(" @ %li jiffies:\n", jiffies);
for (i = 0; i <= 0x148; i += 4)
pr_info("0x%03x: 0x%08x\n", i, saa7146_read(dev, i));
}
#endif
/****************************************************************************
* gpio and debi helper functions
****************************************************************************/
void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data)
{
u32 value = 0;
if (WARN_ON(port > 3))
return;
value = saa7146_read(dev, GPIO_CTRL);
value &= ~(0xff << (8*port));
value |= (data << (8*port));
saa7146_write(dev, GPIO_CTRL, value);
}
/* This DEBI code is based on the saa7146 Stradis driver by Nathan Laredo */
static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev,
unsigned long us1, unsigned long us2)
{
unsigned long timeout;
int err;
/* wait for registers to be programmed */
timeout = jiffies + usecs_to_jiffies(us1);
while (1) {
err = time_after(jiffies, timeout);
if (saa7146_read(dev, MC2) & 2)
break;
if (err) {
pr_debug("%s: %s timed out while waiting for registers getting programmed\n",
dev->name, __func__);
return -ETIMEDOUT;
}
msleep(1);
}
/* wait for transfer to complete */
timeout = jiffies + usecs_to_jiffies(us2);
while (1) {
err = time_after(jiffies, timeout);
if (!(saa7146_read(dev, PSR) & SPCI_DEBI_S))
break;
saa7146_read(dev, MC2);
if (err) {
DEB_S("%s: %s timed out while waiting for transfer completion\n",
dev->name, __func__);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
static inline int saa7146_wait_for_debi_done_busyloop(struct saa7146_dev *dev,
unsigned long us1, unsigned long us2)
{
unsigned long loops;
/* wait for registers to be programmed */
loops = us1;
while (1) {
if (saa7146_read(dev, MC2) & 2)
break;
if (!loops--) {
pr_err("%s: %s timed out while waiting for registers getting programmed\n",
dev->name, __func__);
return -ETIMEDOUT;
}
udelay(1);
}
/* wait for transfer to complete */
loops = us2 / 5;
while (1) {
if (!(saa7146_read(dev, PSR) & SPCI_DEBI_S))
break;
saa7146_read(dev, MC2);
if (!loops--) {
DEB_S("%s: %s timed out while waiting for transfer completion\n",
dev->name, __func__);
return -ETIMEDOUT;
}
udelay(5);
}
return 0;
}
int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop)
{
if (nobusyloop)
return saa7146_wait_for_debi_done_sleep(dev, 50000, 250000);
else
return saa7146_wait_for_debi_done_busyloop(dev, 50000, 250000);
}
/****************************************************************************
* general helper functions
****************************************************************************/
/* this is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c
make sure virt has been allocated with vmalloc_32(), otherwise return NULL
on highmem machines */
static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
struct scatterlist *sglist;
struct page *pg;
int i;
sglist = kmalloc_array(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
if (WARN_ON(PageHighMem(pg)))
goto err;
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
}
return sglist;
err:
kfree(sglist);
return NULL;
}
/********************************************************************************/
/* common page table functions */
void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
{
int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
void *mem = vmalloc_32(length);
int slen = 0;
if (NULL == mem)
goto err_null;
if (!(pt->slist = vmalloc_to_sg(mem, pages)))
goto err_free_mem;
if (saa7146_pgtable_alloc(pci, pt))
goto err_free_slist;
pt->nents = pages;
slen = dma_map_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE);
if (0 == slen)
goto err_free_pgtable;
if (0 != saa7146_pgtable_build_single(pci, pt, pt->slist, slen))
goto err_unmap_sg;
return mem;
err_unmap_sg:
dma_unmap_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE);
err_free_pgtable:
saa7146_pgtable_free(pci, pt);
err_free_slist:
kfree(pt->slist);
pt->slist = NULL;
err_free_mem:
vfree(mem);
err_null:
return NULL;
}
void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt)
{
dma_unmap_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE);
saa7146_pgtable_free(pci, pt);
kfree(pt->slist);
pt->slist = NULL;
vfree(mem);
}
void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt)
{
if (NULL == pt->cpu)
return;
dma_free_coherent(&pci->dev, pt->size, pt->cpu, pt->dma);
pt->cpu = NULL;
}
int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt)
{
__le32 *cpu;
dma_addr_t dma_addr = 0;
cpu = dma_alloc_coherent(&pci->dev, PAGE_SIZE, &dma_addr, GFP_KERNEL);
if (NULL == cpu) {
return -ENOMEM;
}
pt->size = PAGE_SIZE;
pt->cpu = cpu;
pt->dma = dma_addr;
return 0;
}
int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt,
struct scatterlist *list, int sglen)
{
struct sg_dma_page_iter dma_iter;
__le32 *ptr, fill;
int nr_pages = 0;
int i;
if (WARN_ON(!sglen) ||
WARN_ON(list->offset > PAGE_SIZE))
return -EIO;
/* if we have a user buffer, the first page may not be
aligned to a page boundary. */
pt->offset = list->offset;
ptr = pt->cpu;
for_each_sg_dma_page(list, &dma_iter, sglen, 0) {
*ptr++ = cpu_to_le32(sg_page_iter_dma_address(&dma_iter));
nr_pages++;
}
/* safety; fill the page table up with the last valid page */
fill = *(ptr-1);
for (i = nr_pages; i < 1024; i++)
*ptr++ = fill;
return 0;
}
/********************************************************************************/
/* interrupt handler */
static irqreturn_t interrupt_hw(int irq, void *dev_id)
{
struct saa7146_dev *dev = dev_id;
u32 isr;
u32 ack_isr;
/* read out the interrupt status register */
ack_isr = isr = saa7146_read(dev, ISR);
/* is this our interrupt? */
if ( 0 == isr ) {
/* nope, some other device */
return IRQ_NONE;
}
if (dev->ext) {
if (dev->ext->irq_mask & isr) {
if (dev->ext->irq_func)
dev->ext->irq_func(dev, &isr);
isr &= ~dev->ext->irq_mask;
}
}
if (0 != (isr & (MASK_27))) {
DEB_INT("irq: RPS0 (0x%08x)\n", isr);
if (dev->vv_data && dev->vv_callback)
dev->vv_callback(dev,isr);
isr &= ~MASK_27;
}
if (0 != (isr & (MASK_28))) {
if (dev->vv_data && dev->vv_callback)
dev->vv_callback(dev,isr);
isr &= ~MASK_28;
}
if (0 != (isr & (MASK_16|MASK_17))) {
SAA7146_IER_DISABLE(dev, MASK_16|MASK_17);
/* only wake up if we expect something */
if (0 != dev->i2c_op) {
dev->i2c_op = 0;
wake_up(&dev->i2c_wq);
} else {
u32 psr = saa7146_read(dev, PSR);
u32 ssr = saa7146_read(dev, SSR);
pr_warn("%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n",
dev->name, isr, psr, ssr);
}
isr &= ~(MASK_16|MASK_17);
}
if( 0 != isr ) {
ERR("warning: interrupt enabled, but not handled properly.(0x%08x)\n",
isr);
ERR("disabling interrupt source(s)!\n");
SAA7146_IER_DISABLE(dev,isr);
}
saa7146_write(dev, ISR, ack_isr);
return IRQ_HANDLED;
}
/*********************************************************************************/
/* configuration-functions */
static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent)
{
struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data;
struct saa7146_extension *ext = pci_ext->ext;
struct saa7146_dev *dev;
int err = -ENOMEM;
/* clear out mem for sure */
dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
if (!dev) {
ERR("out of memory\n");
goto out;
}
/* create a nice device name */
sprintf(dev->name, "saa7146 (%d)", saa7146_num);
DEB_EE("pci:%p\n", pci);
err = pci_enable_device(pci);
if (err < 0) {
ERR("pci_enable_device() failed\n");
goto err_free;
}
/* enable bus-mastering */
pci_set_master(pci);
dev->pci = pci;
/* get chip-revision; this is needed to enable bug-fixes */
dev->revision = pci->revision;
/* remap the memory from virtual to physical address */
err = pci_request_region(pci, 0, "saa7146");
if (err < 0)
goto err_disable;
dev->mem = ioremap(pci_resource_start(pci, 0),
pci_resource_len(pci, 0));
if (!dev->mem) {
ERR("ioremap() failed\n");
err = -ENODEV;
goto err_release;
}
/* we don't do a master reset here anymore, it screws up
some boards that don't have an i2c-eeprom for configuration
values */
/*
saa7146_write(dev, MC1, MASK_31);
*/
/* disable all irqs */
saa7146_write(dev, IER, 0);
/* shut down all dma transfers and rps tasks */
saa7146_write(dev, MC1, 0x30ff0000);
/* clear out any rps-signals pending */
saa7146_write(dev, MC2, 0xf8000000);
/* request an interrupt for the saa7146 */
err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED,
dev->name, dev);
if (err < 0) {
ERR("request_irq() failed\n");
goto err_unmap;
}
err = -ENOMEM;
/* get memory for various stuff */
dev->d_rps0.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM,
&dev->d_rps0.dma_handle,
GFP_KERNEL);
if (!dev->d_rps0.cpu_addr)
goto err_free_irq;
dev->d_rps1.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM,
&dev->d_rps1.dma_handle,
GFP_KERNEL);
if (!dev->d_rps1.cpu_addr)
goto err_free_rps0;
dev->d_i2c.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM,
&dev->d_i2c.dma_handle, GFP_KERNEL);
if (!dev->d_i2c.cpu_addr)
goto err_free_rps1;
/* the rest + print status message */
pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
dev->mem, dev->revision, pci->irq,
pci->subsystem_vendor, pci->subsystem_device);
dev->ext = ext;
mutex_init(&dev->v4l2_lock);
spin_lock_init(&dev->int_slock);
spin_lock_init(&dev->slock);
mutex_init(&dev->i2c_lock);
dev->module = THIS_MODULE;
init_waitqueue_head(&dev->i2c_wq);
/* set some sane pci arbitrition values */
saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
/* TODO: use the status code of the callback */
err = -ENODEV;
if (ext->probe && ext->probe(dev)) {
DEB_D("ext->probe() failed for %p. skipping device.\n", dev);
goto err_free_i2c;
}
if (ext->attach(dev, pci_ext)) {
DEB_D("ext->attach() failed for %p. skipping device.\n", dev);
goto err_free_i2c;
}
/* V4L extensions will set the pci drvdata to the v4l2_device in the
attach() above. So for those cards that do not use V4L we have to
set it explicitly. */
pci_set_drvdata(pci, &dev->v4l2_dev);
saa7146_num++;
err = 0;
out:
return err;
err_free_i2c:
dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_i2c.cpu_addr,
dev->d_i2c.dma_handle);
err_free_rps1:
dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_rps1.cpu_addr,
dev->d_rps1.dma_handle);
err_free_rps0:
dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_rps0.cpu_addr,
dev->d_rps0.dma_handle);
err_free_irq:
free_irq(pci->irq, (void *)dev);
err_unmap:
iounmap(dev->mem);
err_release:
pci_release_region(pci, 0);
err_disable:
pci_disable_device(pci);
err_free:
kfree(dev);
goto out;
}
static void saa7146_remove_one(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct saa7146_dev *dev = to_saa7146_dev(v4l2_dev);
struct {
void *addr;
dma_addr_t dma;
} dev_map[] = {
{ dev->d_i2c.cpu_addr, dev->d_i2c.dma_handle },
{ dev->d_rps1.cpu_addr, dev->d_rps1.dma_handle },
{ dev->d_rps0.cpu_addr, dev->d_rps0.dma_handle },
{ NULL, 0 }
}, *p;
DEB_EE("dev:%p\n", dev);
dev->ext->detach(dev);
/* shut down all video dma transfers */
saa7146_write(dev, MC1, 0x00ff0000);
/* disable all irqs, release irq-routine */
saa7146_write(dev, IER, 0);
free_irq(pdev->irq, dev);
for (p = dev_map; p->addr; p++)
dma_free_coherent(&pdev->dev, SAA7146_RPS_MEM, p->addr,
p->dma);
iounmap(dev->mem);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
kfree(dev);
saa7146_num--;
}
/*********************************************************************************/
/* extension handling functions */
int saa7146_register_extension(struct saa7146_extension* ext)
{
DEB_EE("ext:%p\n", ext);
ext->driver.name = ext->name;
ext->driver.id_table = ext->pci_tbl;
ext->driver.probe = saa7146_init_one;
ext->driver.remove = saa7146_remove_one;
pr_info("register extension '%s'\n", ext->name);
return pci_register_driver(&ext->driver);
}
int saa7146_unregister_extension(struct saa7146_extension* ext)
{
DEB_EE("ext:%p\n", ext);
pr_info("unregister extension '%s'\n", ext->name);
pci_unregister_driver(&ext->driver);
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_register_extension);
EXPORT_SYMBOL_GPL(saa7146_unregister_extension);
/* misc functions used by extension modules */
EXPORT_SYMBOL_GPL(saa7146_pgtable_alloc);
EXPORT_SYMBOL_GPL(saa7146_pgtable_free);
EXPORT_SYMBOL_GPL(saa7146_pgtable_build_single);
EXPORT_SYMBOL_GPL(saa7146_vmalloc_build_pgtable);
EXPORT_SYMBOL_GPL(saa7146_vfree_destroy_pgtable);
EXPORT_SYMBOL_GPL(saa7146_wait_for_debi_done);
EXPORT_SYMBOL_GPL(saa7146_setgpio);
EXPORT_SYMBOL_GPL(saa7146_i2c_adapter_prepare);
EXPORT_SYMBOL_GPL(saa7146_debug);
MODULE_AUTHOR("Michael Hunold <[email protected]>");
MODULE_DESCRIPTION("driver for generic saa7146-based hardware");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell Armada 375 pinctrl driver based on mvebu pinctrl core
*
* Copyright (C) 2012 Marvell
*
* Thomas Petazzoni <[email protected]>
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-mvebu.h"
static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad2"),
MPP_FUNCTION(0x2, "spi0", "cs1"),
MPP_FUNCTION(0x3, "spi1", "cs1"),
MPP_FUNCTION(0x5, "nand", "io2")),
MPP_MODE(1,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad3"),
MPP_FUNCTION(0x2, "spi0", "mosi"),
MPP_FUNCTION(0x3, "spi1", "mosi"),
MPP_FUNCTION(0x5, "nand", "io3")),
MPP_MODE(2,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad4"),
MPP_FUNCTION(0x2, "ptp", "evreq"),
MPP_FUNCTION(0x3, "led", "c0"),
MPP_FUNCTION(0x4, "audio", "sdi"),
MPP_FUNCTION(0x5, "nand", "io4"),
MPP_FUNCTION(0x6, "spi1", "mosi")),
MPP_MODE(3,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad5"),
MPP_FUNCTION(0x2, "ptp", "trig"),
MPP_FUNCTION(0x3, "led", "p3"),
MPP_FUNCTION(0x4, "audio", "mclk"),
MPP_FUNCTION(0x5, "nand", "io5"),
MPP_FUNCTION(0x6, "spi1", "miso")),
MPP_MODE(4,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad6"),
MPP_FUNCTION(0x2, "spi0", "miso"),
MPP_FUNCTION(0x3, "spi1", "miso"),
MPP_FUNCTION(0x5, "nand", "io6")),
MPP_MODE(5,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad7"),
MPP_FUNCTION(0x2, "spi0", "cs2"),
MPP_FUNCTION(0x3, "spi1", "cs2"),
MPP_FUNCTION(0x5, "nand", "io7"),
MPP_FUNCTION(0x6, "spi1", "miso")),
MPP_MODE(6,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad0"),
MPP_FUNCTION(0x3, "led", "p1"),
MPP_FUNCTION(0x4, "audio", "lrclk"),
MPP_FUNCTION(0x5, "nand", "io0")),
MPP_MODE(7,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ad1"),
MPP_FUNCTION(0x2, "ptp", "clk"),
MPP_FUNCTION(0x3, "led", "p2"),
MPP_FUNCTION(0x4, "audio", "extclk"),
MPP_FUNCTION(0x5, "nand", "io1")),
MPP_MODE(8,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "bootcs"),
MPP_FUNCTION(0x2, "spi0", "cs0"),
MPP_FUNCTION(0x3, "spi1", "cs0"),
MPP_FUNCTION(0x5, "nand", "ce")),
MPP_MODE(9,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "spi0", "sck"),
MPP_FUNCTION(0x3, "spi1", "sck"),
MPP_FUNCTION(0x5, "nand", "we")),
MPP_MODE(10,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "dram", "vttctrl"),
MPP_FUNCTION(0x3, "led", "c1"),
MPP_FUNCTION(0x5, "nand", "re"),
MPP_FUNCTION(0x6, "spi1", "sck")),
MPP_MODE(11,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "a0"),
MPP_FUNCTION(0x3, "led", "c2"),
MPP_FUNCTION(0x4, "audio", "sdo"),
MPP_FUNCTION(0x5, "nand", "cle")),
MPP_MODE(12,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "a1"),
MPP_FUNCTION(0x4, "audio", "bclk"),
MPP_FUNCTION(0x5, "nand", "ale")),
MPP_MODE(13,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "dev", "ready"),
MPP_FUNCTION(0x2, "pcie0", "rstout"),
MPP_FUNCTION(0x3, "pcie1", "rstout"),
MPP_FUNCTION(0x5, "nand", "rb"),
MPP_FUNCTION(0x6, "spi1", "mosi")),
MPP_MODE(14,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "i2c0", "sda"),
MPP_FUNCTION(0x3, "uart1", "txd")),
MPP_MODE(15,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "i2c0", "sck"),
MPP_FUNCTION(0x3, "uart1", "rxd")),
MPP_MODE(16,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "uart0", "txd")),
MPP_MODE(17,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "uart0", "rxd")),
MPP_MODE(18,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "int")),
MPP_MODE(19,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "rst")),
MPP_MODE(20,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "pclk")),
MPP_MODE(21,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "fsync")),
MPP_MODE(22,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "drx")),
MPP_MODE(23,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "tdm", "dtx")),
MPP_MODE(24,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p0"),
MPP_FUNCTION(0x2, "ge1", "rxd0"),
MPP_FUNCTION(0x3, "sd", "cmd"),
MPP_FUNCTION(0x4, "uart0", "rts"),
MPP_FUNCTION(0x5, "spi0", "cs0"),
MPP_FUNCTION(0x6, "dev", "cs1")),
MPP_MODE(25,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p2"),
MPP_FUNCTION(0x2, "ge1", "rxd1"),
MPP_FUNCTION(0x3, "sd", "d0"),
MPP_FUNCTION(0x4, "uart0", "cts"),
MPP_FUNCTION(0x5, "spi0", "mosi"),
MPP_FUNCTION(0x6, "dev", "cs2")),
MPP_MODE(26,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie0", "clkreq"),
MPP_FUNCTION(0x2, "ge1", "rxd2"),
MPP_FUNCTION(0x3, "sd", "d2"),
MPP_FUNCTION(0x4, "uart1", "rts"),
MPP_FUNCTION(0x5, "spi0", "cs1"),
MPP_FUNCTION(0x6, "led", "c1")),
MPP_MODE(27,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie1", "clkreq"),
MPP_FUNCTION(0x2, "ge1", "rxd3"),
MPP_FUNCTION(0x3, "sd", "d1"),
MPP_FUNCTION(0x4, "uart1", "cts"),
MPP_FUNCTION(0x5, "spi0", "miso"),
MPP_FUNCTION(0x6, "led", "c2")),
MPP_MODE(28,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p3"),
MPP_FUNCTION(0x2, "ge1", "txctl"),
MPP_FUNCTION(0x3, "sd", "clk"),
MPP_FUNCTION(0x5, "dram", "vttctrl")),
MPP_MODE(29,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie1", "clkreq"),
MPP_FUNCTION(0x2, "ge1", "rxclk"),
MPP_FUNCTION(0x3, "sd", "d3"),
MPP_FUNCTION(0x5, "spi0", "sck"),
MPP_FUNCTION(0x6, "pcie0", "rstout")),
MPP_MODE(30,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "txd0"),
MPP_FUNCTION(0x3, "spi1", "cs0"),
MPP_FUNCTION(0x5, "led", "p3"),
MPP_FUNCTION(0x6, "ptp", "evreq")),
MPP_MODE(31,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "txd1"),
MPP_FUNCTION(0x3, "spi1", "mosi"),
MPP_FUNCTION(0x5, "led", "p0")),
MPP_MODE(32,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "txd2"),
MPP_FUNCTION(0x3, "spi1", "sck"),
MPP_FUNCTION(0x4, "ptp", "trig"),
MPP_FUNCTION(0x5, "led", "c0")),
MPP_MODE(33,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "txd3"),
MPP_FUNCTION(0x3, "spi1", "miso"),
MPP_FUNCTION(0x5, "led", "p2")),
MPP_MODE(34,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "txclkout"),
MPP_FUNCTION(0x3, "spi1", "sck"),
MPP_FUNCTION(0x5, "led", "c1")),
MPP_MODE(35,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge1", "rxctl"),
MPP_FUNCTION(0x3, "spi1", "cs1"),
MPP_FUNCTION(0x4, "spi0", "cs2"),
MPP_FUNCTION(0x5, "led", "p1")),
MPP_MODE(36,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie0", "clkreq"),
MPP_FUNCTION(0x5, "led", "c2")),
MPP_MODE(37,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie0", "clkreq"),
MPP_FUNCTION(0x2, "tdm", "int"),
MPP_FUNCTION(0x4, "ge", "mdc")),
MPP_MODE(38,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie1", "clkreq"),
MPP_FUNCTION(0x4, "ge", "mdio")),
MPP_MODE(39,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "ref", "clkout"),
MPP_FUNCTION(0x5, "led", "p3")),
MPP_MODE(40,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "uart1", "txd"),
MPP_FUNCTION(0x5, "led", "p0")),
MPP_MODE(41,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "uart1", "rxd"),
MPP_FUNCTION(0x5, "led", "p1")),
MPP_MODE(42,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x3, "spi1", "cs2"),
MPP_FUNCTION(0x4, "led", "c0"),
MPP_FUNCTION(0x6, "ptp", "clk")),
MPP_MODE(43,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "sata0", "prsnt"),
MPP_FUNCTION(0x4, "dram", "vttctrl"),
MPP_FUNCTION(0x5, "led", "c1")),
MPP_MODE(44,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "sata0", "prsnt")),
MPP_MODE(45,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "spi0", "cs2"),
MPP_FUNCTION(0x4, "pcie0", "rstout"),
MPP_FUNCTION(0x5, "led", "c2"),
MPP_FUNCTION(0x6, "spi1", "cs2")),
MPP_MODE(46,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p0"),
MPP_FUNCTION(0x2, "ge0", "txd0"),
MPP_FUNCTION(0x3, "ge1", "txd0"),
MPP_FUNCTION(0x6, "dev", "we1")),
MPP_MODE(47,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p1"),
MPP_FUNCTION(0x2, "ge0", "txd1"),
MPP_FUNCTION(0x3, "ge1", "txd1"),
MPP_FUNCTION(0x5, "ptp", "trig"),
MPP_FUNCTION(0x6, "dev", "ale0")),
MPP_MODE(48,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p2"),
MPP_FUNCTION(0x2, "ge0", "txd2"),
MPP_FUNCTION(0x3, "ge1", "txd2"),
MPP_FUNCTION(0x6, "dev", "ale1")),
MPP_MODE(49,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "p3"),
MPP_FUNCTION(0x2, "ge0", "txd3"),
MPP_FUNCTION(0x3, "ge1", "txd3"),
MPP_FUNCTION(0x6, "dev", "a2")),
MPP_MODE(50,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "c0"),
MPP_FUNCTION(0x2, "ge0", "rxd0"),
MPP_FUNCTION(0x3, "ge1", "rxd0"),
MPP_FUNCTION(0x5, "ptp", "evreq"),
MPP_FUNCTION(0x6, "dev", "ad12")),
MPP_MODE(51,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "c1"),
MPP_FUNCTION(0x2, "ge0", "rxd1"),
MPP_FUNCTION(0x3, "ge1", "rxd1"),
MPP_FUNCTION(0x6, "dev", "ad8")),
MPP_MODE(52,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "led", "c2"),
MPP_FUNCTION(0x2, "ge0", "rxd2"),
MPP_FUNCTION(0x3, "ge1", "rxd2"),
MPP_FUNCTION(0x5, "i2c0", "sda"),
MPP_FUNCTION(0x6, "dev", "ad9")),
MPP_MODE(53,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie1", "rstout"),
MPP_FUNCTION(0x2, "ge0", "rxd3"),
MPP_FUNCTION(0x3, "ge1", "rxd3"),
MPP_FUNCTION(0x5, "i2c0", "sck"),
MPP_FUNCTION(0x6, "dev", "ad10")),
MPP_MODE(54,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "pcie0", "rstout"),
MPP_FUNCTION(0x2, "ge0", "rxctl"),
MPP_FUNCTION(0x3, "ge1", "rxctl"),
MPP_FUNCTION(0x6, "dev", "ad11")),
MPP_MODE(55,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge0", "rxclk"),
MPP_FUNCTION(0x3, "ge1", "rxclk"),
MPP_FUNCTION(0x6, "dev", "cs0")),
MPP_MODE(56,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge0", "txclkout"),
MPP_FUNCTION(0x3, "ge1", "txclkout"),
MPP_FUNCTION(0x6, "dev", "oe")),
MPP_MODE(57,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ge0", "txctl"),
MPP_FUNCTION(0x3, "ge1", "txctl"),
MPP_FUNCTION(0x6, "dev", "we0")),
MPP_MODE(58,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "led", "c0")),
MPP_MODE(59,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x4, "led", "c1")),
MPP_MODE(60,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "uart1", "txd"),
MPP_FUNCTION(0x4, "led", "c2"),
MPP_FUNCTION(0x6, "dev", "ad13")),
MPP_MODE(61,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "i2c1", "sda"),
MPP_FUNCTION(0x2, "uart1", "rxd"),
MPP_FUNCTION(0x3, "spi1", "cs2"),
MPP_FUNCTION(0x4, "led", "p0"),
MPP_FUNCTION(0x6, "dev", "ad14")),
MPP_MODE(62,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "i2c1", "sck"),
MPP_FUNCTION(0x4, "led", "p1"),
MPP_FUNCTION(0x6, "dev", "ad15")),
MPP_MODE(63,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ptp", "trig"),
MPP_FUNCTION(0x4, "led", "p2"),
MPP_FUNCTION(0x6, "dev", "burst/last")),
MPP_MODE(64,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "dram", "vttctrl"),
MPP_FUNCTION(0x4, "led", "p3")),
MPP_MODE(65,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "sata1", "prsnt")),
MPP_MODE(66,
MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x2, "ptp", "evreq"),
MPP_FUNCTION(0x4, "spi1", "cs3"),
MPP_FUNCTION(0x5, "pcie0", "rstout"),
MPP_FUNCTION(0x6, "dev", "cs3")),
};
static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
static const struct of_device_id armada_375_pinctrl_of_match[] = {
{ .compatible = "marvell,mv88f6720-pinctrl" },
{ },
};
static const struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
MPP_FUNC_CTRL(0, 69, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 32),
MPP_GPIO_RANGE(1, 32, 32, 32),
MPP_GPIO_RANGE(2, 64, 64, 3),
};
static int armada_375_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
soc->variant = 0; /* no variants for Armada 375 */
soc->controls = mv88f6720_mpp_controls;
soc->ncontrols = ARRAY_SIZE(mv88f6720_mpp_controls);
soc->modes = mv88f6720_mpp_modes;
soc->nmodes = ARRAY_SIZE(mv88f6720_mpp_modes);
soc->gpioranges = mv88f6720_mpp_gpio_ranges;
soc->ngpioranges = ARRAY_SIZE(mv88f6720_mpp_gpio_ranges);
pdev->dev.platform_data = soc;
return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_375_pinctrl_driver = {
.driver = {
.name = "armada-375-pinctrl",
.of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
},
.probe = armada_375_pinctrl_probe,
};
builtin_platform_driver(armada_375_pinctrl_driver);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021-2022 Digiteq Automotive
* author: Martin Tuma <[email protected]>
*/
#include <linux/ioport.h>
#include "mgb4_regs.h"
int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
{
regs->mapbase = res->start;
regs->mapsize = resource_size(res);
if (!request_mem_region(regs->mapbase, regs->mapsize, res->name))
return -EINVAL;
regs->membase = ioremap(regs->mapbase, regs->mapsize);
if (!regs->membase) {
release_mem_region(regs->mapbase, regs->mapsize);
return -EINVAL;
}
return 0;
}
void mgb4_regs_free(struct mgb4_regs *regs)
{
iounmap(regs->membase);
release_mem_region(regs->mapbase, regs->mapsize);
}
|
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
void test_skb_ctx(void)
{
struct __sk_buff skb = {
.cb[0] = 1,
.cb[1] = 2,
.cb[2] = 3,
.cb[3] = 4,
.cb[4] = 5,
.priority = 6,
.ingress_ifindex = 11,
.ifindex = 1,
.tstamp = 7,
.wire_len = 100,
.gso_segs = 8,
.mark = 9,
.gso_size = 10,
.hwtstamp = 11,
};
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
);
struct bpf_object *obj;
int err, prog_fd, i;
err = bpf_prog_test_load("./test_skb_ctx.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (!ASSERT_OK(err, "load"))
return;
/* ctx_in != NULL, ctx_size_in == 0 */
tattr.ctx_size_in = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "ctx_size_in");
tattr.ctx_size_in = sizeof(skb);
/* ctx_out != NULL, ctx_size_out == 0 */
tattr.ctx_size_out = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "ctx_size_out");
tattr.ctx_size_out = sizeof(skb);
/* non-zero [len, tc_index] fields should be rejected*/
skb.len = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "len");
skb.len = 0;
skb.tc_index = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "tc_index");
skb.tc_index = 0;
/* non-zero [hash, sk] fields should be rejected */
skb.hash = 1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "hash");
skb.hash = 0;
skb.sk = (struct bpf_sock *)1;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_NEQ(err, 0, "sk");
skb.sk = 0;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_OK(err, "test_run");
ASSERT_OK(tattr.retval, "test_run retval");
ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out");
for (i = 0; i < 5; i++)
ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb");
ASSERT_EQ(skb.priority, 7, "ctx_out_priority");
ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex");
ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex");
ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp");
ASSERT_EQ(skb.mark, 10, "ctx_out_mark");
bpf_object__close(obj);
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _IAVF_REGISTER_H_
#define _IAVF_REGISTER_H_
#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
#define IAVF_VF_ARQH1_ARQH_SHIFT 0
#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(0x1, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define IAVF_VFQF_HKEY_MAX_INDEX 12
#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define IAVF_VFQF_HLUT_MAX_INDEX 15
#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
#endif /* _IAVF_REGISTER_H_ */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/char/hpilo.h
*
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* David Altobelli <[email protected]>
*/
#ifndef __HPILO_H
#define __HPILO_H
#define ILO_NAME "hpilo"
/* iLO ASIC PCI revision id */
#define PCI_REV_ID_NECHES 7
/* max number of open channel control blocks per device, hw limited to 32 */
#define MAX_CCB 24
/* min number of open channel control blocks per device, hw limited to 32 */
#define MIN_CCB 8
/* max number of supported devices */
#define MAX_ILO_DEV 1
/* max number of files */
#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV)
/* total wait time in usec */
#define MAX_WAIT_TIME 10000
/* per spin wait time in usec */
#define WAIT_TIME 10
/* spin counter for open/close delay */
#define MAX_WAIT (MAX_WAIT_TIME / WAIT_TIME)
/*
* Per device, used to track global memory allocations.
*/
struct ilo_hwinfo {
/* mmio registers on device */
char __iomem *mmio_vaddr;
/* doorbell registers on device */
char __iomem *db_vaddr;
/* shared memory on device used for channel control blocks */
char __iomem *ram_vaddr;
/* files corresponding to this device */
struct ccb_data *ccb_alloc[MAX_CCB];
struct pci_dev *ilo_dev;
/*
* open_lock serializes ccb_cnt during open and close
* [ irq disabled ]
* -> alloc_lock used when adding/removing/searching ccb_alloc,
* which represents all ccbs open on the device
* --> fifo_lock controls access to fifo queues shared with hw
*
* Locks must be taken in this order, but open_lock and alloc_lock
* are optional, they do not need to be held in order to take a
* lower level lock.
*/
spinlock_t open_lock;
spinlock_t alloc_lock;
spinlock_t fifo_lock;
struct cdev cdev;
};
/* offset from mmio_vaddr for enabling doorbell interrupts */
#define DB_IRQ 0xB2
/* offset from mmio_vaddr for outbound communications */
#define DB_OUT 0xD4
/* DB_OUT reset bit */
#define DB_RESET 26
/*
* Channel control block. Used to manage hardware queues.
* The format must match hw's version. The hw ccb is 128 bytes,
* but the context area shouldn't be touched by the driver.
*/
#define ILOSW_CCB_SZ 64
#define ILOHW_CCB_SZ 128
struct ccb {
union {
char *send_fifobar;
u64 send_fifobar_pa;
} ccb_u1;
union {
char *send_desc;
u64 send_desc_pa;
} ccb_u2;
u64 send_ctrl;
union {
char *recv_fifobar;
u64 recv_fifobar_pa;
} ccb_u3;
union {
char *recv_desc;
u64 recv_desc_pa;
} ccb_u4;
u64 recv_ctrl;
union {
char __iomem *db_base;
u64 padding5;
} ccb_u5;
u64 channel;
/* unused context area (64 bytes) */
};
/* ccb queue parameters */
#define SENDQ 1
#define RECVQ 2
#define NR_QENTRY 4
#define L2_QENTRY_SZ 12
/* ccb ctrl bitfields */
#define CTRL_BITPOS_L2SZ 0
#define CTRL_BITPOS_FIFOINDEXMASK 4
#define CTRL_BITPOS_DESCLIMIT 18
#define CTRL_BITPOS_A 30
#define CTRL_BITPOS_G 31
/* ccb doorbell macros */
#define L2_DB_SIZE 14
#define ONE_DB_SIZE (1 << L2_DB_SIZE)
/*
* Per fd structure used to track the ccb allocated to that dev file.
*/
struct ccb_data {
/* software version of ccb, using virtual addrs */
struct ccb driver_ccb;
/* hardware version of ccb, using physical addrs */
struct ccb ilo_ccb;
/* hardware ccb is written to this shared mapped device memory */
struct ccb __iomem *mapped_ccb;
/* dma'able memory used for send/recv queues */
void *dma_va;
dma_addr_t dma_pa;
size_t dma_size;
/* pointer to hardware device info */
struct ilo_hwinfo *ilo_hw;
/* queue for this ccb to wait for recv data */
wait_queue_head_t ccb_waitq;
/* usage count, to allow for shared ccb's */
int ccb_cnt;
/* open wanted exclusive access to this ccb */
int ccb_excl;
};
/*
* FIFO queue structure, shared with hw.
*/
#define ILO_START_ALIGN 4096
#define ILO_CACHE_SZ 128
struct fifo {
u64 nrents; /* user requested number of fifo entries */
u64 imask; /* mask to extract valid fifo index */
u64 merge; /* O/C bits to merge in during enqueue operation */
u64 reset; /* set to non-zero when the target device resets */
u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
u64 head;
u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
u64 tail;
u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
u64 fifobar[];
};
/* convert between struct fifo, and the fifobar, which is saved in the ccb */
#define FIFOHANDLESIZE (sizeof(struct fifo))
#define FIFOBARTOHANDLE(_fifo) \
((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE))
/* the number of qwords to consume from the entry descriptor */
#define ENTRY_BITPOS_QWORDS 0
/* descriptor index number (within a specified queue) */
#define ENTRY_BITPOS_DESCRIPTOR 10
/* state bit, fifo entry consumed by consumer */
#define ENTRY_BITPOS_C 22
/* state bit, fifo entry is occupied */
#define ENTRY_BITPOS_O 23
#define ENTRY_BITS_QWORDS 10
#define ENTRY_BITS_DESCRIPTOR 12
#define ENTRY_BITS_C 1
#define ENTRY_BITS_O 1
#define ENTRY_BITS_TOTAL \
(ENTRY_BITS_C + ENTRY_BITS_O + \
ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR)
/* extract various entry fields */
#define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1)
#define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C)
#define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O)
#define ENTRY_MASK_QWORDS \
(((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS)
#define ENTRY_MASK_DESCRIPTOR \
(((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR)
#define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O))
#endif /* __HPILO_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2019 Toradex
*/
#include <dt-bindings/input/linux-event-codes.h>
/ {
aliases {
rtc0 = &rtc_i2c;
rtc1 = &rtc;
};
/* fixed crystal dedicated to mcp25xx */
clk16m: clock-16mhz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <16000000>;
};
};
/* Colibri Analogue Inputs */
&adc0 {
status = "okay";
};
/* Colibri PWM_A */
&adma_pwm {
status = "okay";
};
&colibri_gpio_keys {
status = "okay";
};
&extcon_usbc_det {
status = "okay";
};
&i2c1 {
status = "okay";
/* M41T0M6 real time clock on carrier board */
rtc_i2c: rtc@68 {
compatible = "st,m41t0";
reg = <0x68>;
};
};
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>,
<&pinctrl_lpspi2_cs2>;
};
/* Colibri SPI */
&lpspi2 {
status = "okay";
mcp2515: can@0 {
compatible = "microchip,mcp2515";
reg = <0>;
interrupt-parent = <&lsio_gpio3>;
interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
pinctrl-0 = <&pinctrl_can_int>;
pinctrl-names = "default";
clocks = <&clk16m>;
spi-max-frequency = <10000000>;
};
};
/* Colibri UART_B */
&lpuart0 {
status = "okay";
};
/* Colibri UART_C */
&lpuart2 {
status = "okay";
};
/* Colibri PWM_B */
&lsio_pwm0 {
status = "okay";
};
/* Colibri PWM_C */
&lsio_pwm1 {
status = "okay";
};
/* Colibri PWM_D */
&lsio_pwm2 {
status = "okay";
};
/* Colibri UART_A */
&lpuart3 {
status = "okay";
};
/* Colibri FastEthernet */
&fec1 {
status = "okay";
};
/* USB PHY for usbotg3 */
&usb3_phy {
status = "okay";
};
&usbotg1 {
status = "okay";
};
&usbotg3 {
status = "okay";
};
&usbotg3_cdns3 {
status = "okay";
};
/* USB PHY for usbotg1 */
&usbphy1 {
status = "okay";
};
/* Colibri SD/MMC Card */
&usdhc2 {
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ePAPR para-virtualization support.
*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*/
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
#include <asm/text-patching.h>
#include <asm/machdep.h>
#include <asm/inst.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
#endif
bool epapr_paravirt_enabled;
static bool __maybe_unused epapr_has_idle;
static int __init early_init_dt_scan_epapr(unsigned long node,
const char *uname,
int depth, void *data)
{
const u32 *insts;
int len;
int i;
insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
if (!insts)
return 0;
if (len % 4 || len > (4 * 4))
return -1;
for (i = 0; i < (len / 4); i++) {
ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_flat_dt_prop(node, "has-idle", NULL))
epapr_has_idle = true;
#endif
epapr_paravirt_enabled = true;
return 1;
}
int __init epapr_paravirt_early_init(void)
{
of_scan_flat_dt(early_init_dt_scan_epapr, NULL);
return 0;
}
static int __init epapr_idle_init(void)
{
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (epapr_has_idle)
ppc_md.power_save = epapr_ev_idle;
#endif
return 0;
}
postcore_initcall(epapr_idle_init);
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (c) 2022/23 Siemens Mobility GmbH */
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* flags definition for n_gsm
*
* Used by:
* struct gsm_config_ext.flags
* struct gsm_dlci_config.flags
*/
/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
* incompatible settings were provided. Always cleared on retrieval.
*/
#define GSM_FL_RESTART _BITUL(0)
/**
* struct gsm_config - n_gsm basic configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
* to retrieve and set the basic parameters of an n_gsm ldisc.
* struct gsm_config_ext can be used to configure extended ldisc parameters.
*
* All timers are in units of 1/100th of a second.
*
* @adaption: Convergence layer type
* @encapsulation: Framing (0 = basic option, 1 = advanced option)
* @initiator: Initiator or responder
* @t1: Acknowledgment timer
* @t2: Response timer for multiplexer control channel
* @t3: Response timer for wake-up procedure
* @n2: Maximum number of retransmissions
* @mru: Maximum incoming frame payload size
* @mtu: Maximum outgoing frame payload size
* @k: Window size
* @i: Frame type (1 = UIH, 2 = UI)
* @unused: Can not be used
*/
struct gsm_config
{
unsigned int adaption;
unsigned int encapsulation;
unsigned int initiator;
unsigned int t1;
unsigned int t2;
unsigned int t3;
unsigned int n2;
unsigned int mru;
unsigned int mtu;
unsigned int k;
unsigned int i;
unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
/**
* struct gsm_netconfig - n_gsm network configuration parameters
*
* This structure is used in combination with GSMIOC_ENABLE_NET and
* GSMIOC_DISABLE_NET to enable or disable a network data connection
* over a mux virtual tty channel. This is for modems that support
* data connections with raw IP frames instead of PPP.
*
* @adaption: Adaption to use in network mode.
* @protocol: Protocol to use - only ETH_P_IP supported.
* @unused2: Can not be used.
* @if_name: Interface name format string.
* @unused: Can not be used.
*/
struct gsm_netconfig {
unsigned int adaption;
unsigned short protocol;
unsigned short unused2;
char if_name[IFNAMSIZ];
__u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
#define GSMIOC_DISABLE_NET _IO('G', 3)
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
/**
* struct gsm_config_ext - n_gsm extended configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF_EXT and
* GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
* n_gsm ldisc.
*
* All timers are in units of 1/100th of a second.
*
* @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
* @wait_config: Wait for DLCI config before opening virtual link?
* @flags: Mux specific flags.
* @reserved: For future use, must be initialized to zero.
*/
struct gsm_config_ext {
__u32 keep_alive;
__u32 wait_config;
__u32 flags;
__u32 reserved[5];
};
#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
/**
* struct gsm_dlci_config - n_gsm channel configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF_DLCI and
* GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
* of an n_gsm ldisc.
*
* Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
*
* @channel: DLCI (0 for the associated DLCI).
* @adaption: Convergence layer type.
* @mtu: Maximum transfer unit.
* @priority: Priority (0 for default value).
* @i: Frame type (1 = UIH, 2 = UI).
* @k: Window size (0 for default value).
* @flags: DLCI specific flags.
* @reserved: For future use, must be initialized to zero.
*/
struct gsm_dlci_config {
__u32 channel;
__u32 adaption;
__u32 mtu;
__u32 priority;
__u32 i;
__u32 k;
__u32 flags;
__u32 reserved[7];
};
#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
#define GSMIOC_SETCONF_DLCI _IOW('G', 8, struct gsm_dlci_config)
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* boot.c - Architecture-Specific Low-Level ACPI Boot Support
*
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2001 Jun Nakajima <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/acpi_pmtmr.h>
#include <linux/efi.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
#include <linux/serial_core.h>
#include <linux/pgtable.h>
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/io.h>
#include <asm/mpspec.h>
#include <asm/smp.h>
#include <asm/i8259.h>
#include <asm/setup.h>
#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
static int __initdata acpi_force = 0;
int acpi_disabled;
EXPORT_SYMBOL(acpi_disabled);
#ifdef CONFIG_X86_64
# include <asm/proto.h>
#endif /* X86 */
int acpi_noirq; /* skip ACPI IRQ initialization */
static int acpi_nobgrt; /* skip ACPI BGRT */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
int acpi_lapic;
int acpi_ioapic;
int acpi_strict;
int acpi_disable_cmcff;
bool acpi_int_src_ovr[NR_IRQS_LEGACY];
/* ACPI SCI override configuration */
u8 acpi_sci_flags __initdata;
u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
static bool has_lapic_cpus __initdata;
static bool acpi_support_online_capable;
#endif
#ifdef CONFIG_X86_IO_APIC
/*
* Locks related to IOAPIC hotplug
* Hotplug side:
* ->device_hotplug_lock
* ->acpi_ioapic_lock
* ->ioapic_lock
* Interrupt mapping side:
* ->acpi_ioapic_lock
* ->ioapic_mutex
* ->ioapic_lock
*/
static DEFINE_MUTEX(acpi_ioapic_lock);
#endif
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
/*
* The default interrupt routing model is PIC (8259). This gets
* overridden if IOAPICs are enumerated (below).
*/
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
/*
* ISA irqs by default are the first 16 gsis but can be
* any gsi as specified by an interrupt source override.
*/
static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
/*
* This is just a simple wrapper around early_memremap(),
* with sanity checks for phys == 0 and size == 0.
*/
void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
{
if (!phys || !size)
return NULL;
return early_memremap(phys, size);
}
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
{
if (!map || !size)
return;
early_memunmap(map, size);
}
#ifdef CONFIG_X86_LOCAL_APIC
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
struct acpi_table_madt *madt = NULL;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -EINVAL;
madt = (struct acpi_table_madt *)table;
if (!madt) {
pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
if (madt->flags & ACPI_MADT_PCAT_COMPAT)
legacy_pic_pcat_compat();
/* ACPI 6.3 and newer support the online capable bit. */
if (acpi_gbl_FADT.header.revision > 6 ||
(acpi_gbl_FADT.header.revision == 6 &&
acpi_gbl_FADT.minor_revision >= 3))
acpi_support_online_capable = true;
default_acpi_madt_oem_check(madt->header.oem_id,
madt->header.oem_table_id);
return 0;
}
static bool __init acpi_is_processor_usable(u32 lapic_flags)
{
if (lapic_flags & ACPI_MADT_ENABLED)
return true;
if (!acpi_support_online_capable ||
(lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
return true;
return false;
}
static int __init
acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
#ifdef CONFIG_X86_X2APIC
u32 apic_id;
u8 enabled;
#endif
processor = (struct acpi_madt_local_x2apic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_X86_X2APIC
apic_id = processor->local_apic_id;
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
/* Ignore invalid ID */
if (apic_id == 0xffffffff)
return 0;
/* don't register processors that cannot be onlined */
if (!acpi_is_processor_usable(processor->lapic_flags))
return 0;
/*
* According to https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#processor-local-x2apic-structure
* when MADT provides both valid LAPIC and x2APIC entries, the APIC ID
* in x2APIC must be equal or greater than 0xff.
*/
if (has_lapic_cpus && apic_id < 0xff)
return 0;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
if (!apic_id_valid(apic_id)) {
if (enabled)
pr_warn("x2apic entry ignored\n");
return 0;
}
topology_register_apic(apic_id, processor->uid, enabled);
#else
pr_warn("x2apic entry ignored\n");
#endif
return 0;
}
static int __init
acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic *processor = NULL;
processor = (struct acpi_madt_local_apic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
/* Ignore invalid ID */
if (processor->id == 0xff)
return 0;
/* don't register processors that can not be onlined */
if (!acpi_is_processor_usable(processor->lapic_flags))
return 0;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
topology_register_apic(processor->id, /* APIC ID */
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
has_lapic_cpus = true;
return 0;
}
static int __init
acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_local_sapic *processor = NULL;
processor = (struct acpi_madt_local_sapic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
topology_register_apic((processor->id << 8) | processor->eid,/* APIC ID */
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
return 0;
}
static int __init
acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
acpi_lapic_addr = lapic_addr_ovr->address;
return 0;
}
static int __init
acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
if (BAD_MADT_ENTRY(x2apic_nmi, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
static int __init
acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
if (BAD_MADT_ENTRY(lapic_nmi, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
#define MP_ISA_BUS 0
static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
u8 trigger, u32 gsi);
static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi)
{
/*
* Check bus_irq boundary.
*/
if (bus_irq >= NR_IRQS_LEGACY) {
pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
return;
}
/*
* TBD: This check is for faulty timer entries, where the override
* erroneously sets the trigger to level, resulting in a HUGE
* increase of timer interrupts!
*/
if ((bus_irq == 0) && (trigger == 3))
trigger = 1;
if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
return;
/*
* Reset default identity mapping if gsi is also an legacy IRQ,
* otherwise there will be more than one entry with the same GSI
* and acpi_isa_irq_to_gsi() may give wrong result.
*/
if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
isa_irq_to_gsi[bus_irq] = gsi;
}
static void mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
#ifdef CONFIG_X86_MPPARSE
struct mpc_intsrc mp_irq;
struct pci_dev *pdev;
unsigned char number;
unsigned int devfn;
int ioapic;
u8 pin;
if (!acpi_ioapic)
return;
if (!dev || !dev_is_pci(dev))
return;
pdev = to_pci_dev(dev);
number = pdev->bus->number;
devfn = pdev->devfn;
pin = pdev->pin;
/* print the entry should happen on mptable identically */
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
mp_irq.srcbus = number;
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
ioapic = mp_find_ioapic(gsi);
mp_irq.dstapic = mpc_ioapic_id(ioapic);
mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
mp_save_irq(&mp_irq);
#endif
}
static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
u8 trigger, u32 gsi)
{
struct mpc_intsrc mp_irq;
int ioapic, pin;
/* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
ioapic = mp_find_ioapic(gsi);
if (ioapic < 0) {
pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
return ioapic;
}
pin = mp_find_ioapic_pin(ioapic, gsi);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = (trigger << 2) | polarity;
mp_irq.srcbus = MP_ISA_BUS;
mp_irq.srcbusirq = bus_irq;
mp_irq.dstapic = mpc_ioapic_id(ioapic);
mp_irq.dstirq = pin;
mp_save_irq(&mp_irq);
return 0;
}
static int __init
acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_io_apic *ioapic = NULL;
struct ioapic_domain_cfg cfg = {
.type = IOAPIC_DOMAIN_DYNAMIC,
.ops = &mp_ioapic_irqdomain_ops,
};
ioapic = (struct acpi_madt_io_apic *)header;
if (BAD_MADT_ENTRY(ioapic, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
/* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */
if (ioapic->global_irq_base < nr_legacy_irqs())
cfg.type = IOAPIC_DOMAIN_LEGACY;
mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base,
&cfg);
return 0;
}
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
if (polarity == 0) /* compatible SCI polarity is low */
polarity = 3;
/* Command-line over-ride via acpi_sci= */
if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
if (bus_irq < NR_IRQS_LEGACY)
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
else
mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
* and for later update of acpi_gbl_FADT
*/
acpi_sci_override_gsi = gsi;
return;
}
static int __init
acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_interrupt_override *intsrc = NULL;
intsrc = (struct acpi_madt_interrupt_override *)header;
if (BAD_MADT_ENTRY(intsrc, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
if (intsrc->source_irq < NR_IRQS_LEGACY)
acpi_int_src_ovr[intsrc->source_irq] = true;
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
acpi_sci_ioapic_setup(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
intsrc->global_irq);
return 0;
}
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
pr_warn("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
mp_override_legacy_irq(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
intsrc->global_irq);
return 0;
}
static int __init
acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_nmi_source *nmi_src = NULL;
nmi_src = (struct acpi_madt_nmi_source *)header;
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
/* TBD: Support nimsrc entries? */
return 0;
}
#endif /* CONFIG_X86_IO_APIC */
/*
* acpi_pic_sci_set_trigger()
*
* use ELCR to set PIC-mode trigger type for SCI
*
* If a PIC-mode SCI is not recognized or gives spurious IRQ7's
* it may require Edge Trigger -- use "acpi_sci=edge"
*
* Port 0x4d0-4d1 are ELCR1 and ELCR2, the Edge/Level Control Registers
* for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
* ELCR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
* ELCR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
*/
void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
{
unsigned int mask = 1 << irq;
unsigned int old, new;
/* Real old ELCR mask */
old = inb(PIC_ELCR1) | (inb(PIC_ELCR2) << 8);
/*
* If we use ACPI to set PCI IRQs, then we should clear ELCR
* since we will set it correctly as we enable the PCI irq
* routing.
*/
new = acpi_noirq ? old : 0;
/*
* Update SCI information in the ELCR, it isn't in the PCI
* routing tables..
*/
switch (trigger) {
case 1: /* Edge - clear */
new &= ~mask;
break;
case 3: /* Level - set */
new |= mask;
break;
}
if (old == new)
return;
pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, PIC_ELCR1);
outb(new >> 8, PIC_ELCR2);
}
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{
int rc, irq, trigger, polarity;
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
*irqp = gsi;
return 0;
}
rc = acpi_get_override_irq(gsi, &trigger, &polarity);
if (rc)
return rc;
trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
if (irq < 0)
return irq;
*irqp = irq;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
{
if (isa_irq < nr_legacy_irqs() &&
isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
*gsi = isa_irq_to_gsi[isa_irq];
return 0;
}
return -1;
}
static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
int trigger, int polarity)
{
#ifdef CONFIG_PCI
/*
* Make sure all (legacy) PCI IRQs are set as level-triggered.
*/
if (trigger == ACPI_LEVEL_SENSITIVE)
elcr_set_level_irq(gsi);
#endif
return gsi;
}
#ifdef CONFIG_X86_LOCAL_APIC
static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
int trigger, int polarity)
{
int irq = gsi;
#ifdef CONFIG_X86_IO_APIC
int node;
struct irq_alloc_info info;
node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
ioapic_set_alloc_attr(&info, node, trigger, polarity);
mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
/* Don't set up the ACPI SCI because it's already set up */
if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
mutex_unlock(&acpi_ioapic_lock);
#endif
return irq;
}
static void acpi_unregister_gsi_ioapic(u32 gsi)
{
#ifdef CONFIG_X86_IO_APIC
int irq;
mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi, 0, NULL);
if (irq > 0)
mp_unmap_irq(irq);
mutex_unlock(&acpi_ioapic_lock);
#endif
}
#endif
int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity) = acpi_register_gsi_pic;
void (*__acpi_unregister_gsi)(u32 gsi) = NULL;
#ifdef CONFIG_ACPI_SLEEP
int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel;
#else
int (*acpi_suspend_lowlevel)(void);
#endif
/*
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
{
return __acpi_register_gsi(dev, gsi, trigger, polarity);
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
void acpi_unregister_gsi(u32 gsi)
{
if (__acpi_unregister_gsi)
__acpi_unregister_gsi(gsi);
}
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
#ifdef CONFIG_X86_LOCAL_APIC
static void __init acpi_set_irq_model_ioapic(void)
{
acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
__acpi_register_gsi = acpi_register_gsi_ioapic;
__acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
acpi_ioapic = 1;
}
#endif
/*
* ACPI based hotplug support for CPU
*/
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
nid = acpi_get_node(handle);
if (nid != NUMA_NO_NODE) {
set_apicid_to_node(physid, nid);
numa_set_node(cpu, nid);
}
#endif
return 0;
}
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
{
int cpu = topology_hotplug_apic(physid, acpi_id);
if (cpu < 0) {
pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
acpi_processor_set_pdc(handle);
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
return 0;
}
EXPORT_SYMBOL(acpi_map_cpu);
int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
#endif
topology_hotunplug_apic(cpu);
return 0;
}
EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
{
int ret = -ENOSYS;
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int ioapic_id;
u64 addr;
struct ioapic_domain_cfg cfg = {
.type = IOAPIC_DOMAIN_DYNAMIC,
.ops = &mp_ioapic_irqdomain_ops,
};
ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
if (ioapic_id < 0) {
unsigned long long uid;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
NULL, &uid);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
return -EINVAL;
}
ioapic_id = (int)uid;
}
mutex_lock(&acpi_ioapic_lock);
ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
mutex_unlock(&acpi_ioapic_lock);
#endif
return ret;
}
EXPORT_SYMBOL(acpi_register_ioapic);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
{
int ret = -ENOSYS;
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
mutex_lock(&acpi_ioapic_lock);
ret = mp_unregister_ioapic(gsi_base);
mutex_unlock(&acpi_ioapic_lock);
#endif
return ret;
}
EXPORT_SYMBOL(acpi_unregister_ioapic);
/**
* acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base
* has been registered
* @handle: ACPI handle of the IOAPIC device
* @gsi_base: GSI base associated with the IOAPIC
*
* Assume caller holds some type of lock to serialize acpi_ioapic_registered()
* with acpi_register_ioapic()/acpi_unregister_ioapic().
*/
int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
{
int ret = 0;
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
mutex_lock(&acpi_ioapic_lock);
ret = mp_ioapic_registered(gsi_base);
mutex_unlock(&acpi_ioapic_lock);
#endif
return ret;
}
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
sbf_port = sb->cmos_index; /* Save CMOS port */
return 0;
}
#ifdef CONFIG_HPET_TIMER
#include <asm/hpet.h>
static struct resource *hpet_res __initdata;
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
pr_warn("HPET timers must be located in memory.\n");
return -1;
}
hpet_address = hpet_tbl->address.address;
hpet_blockid = hpet_tbl->sequence;
/*
* Some broken BIOSes advertise HPET at 0x0. We really do not
* want to allocate a resource there.
*/
if (!hpet_address) {
pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
/*
* Some even more broken BIOSes advertise HPET at
* 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
* some noise:
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
hpet_tbl->id);
hpet_address = 0;
return 0;
}
pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
hpet_tbl->id);
hpet_address >>= 32;
}
#endif
pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
/*
* Allocate and initialize the HPET firmware resource for adding into
* the resource tree during the lateinit timeframe.
*/
#define HPET_RESOURCE_NAME_SIZE 9
hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
SMP_CACHE_BYTES);
if (!hpet_res)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
hpet_tbl->sequence);
hpet_res->start = hpet_address;
hpet_res->end = hpet_address + (1 * 1024) - 1;
return 0;
}
/*
* hpet_insert_resource inserts the HPET resources used into the resource
* tree.
*/
static __init int hpet_insert_resource(void)
{
if (!hpet_res)
return 1;
return insert_resource(&iomem_resource, hpet_res);
}
late_initcall(hpet_insert_resource);
#else
#define acpi_parse_hpet NULL
#endif
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
pr_debug("no legacy devices present\n");
x86_platform.legacy.devices.pnpbios = 0;
}
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
pr_debug("i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
pr_debug("not registering RTC platform device\n");
x86_platform.legacy.rtc = 0;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
pr_debug("probing for VGA not safe\n");
x86_platform.legacy.no_vga = 1;
}
#ifdef CONFIG_X86_PM_TIMER
/* detect the location of the ACPI PM Timer */
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
/* FADT rev. 2 */
if (acpi_gbl_FADT.xpm_timer_block.space_id !=
ACPI_ADR_SPACE_SYSTEM_IO)
return 0;
pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
/*
* "X" fields are optional extensions to the original V1.0
* fields, so we must selectively expand V1.0 fields if the
* corresponding X field is zero.
*/
if (!pmtmr_ioport)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
} else {
/* FADT rev. 1 */
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
#endif
return 0;
}
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Parse LAPIC entries in MADT
* returns 0 on success, < 0 on error
*/
static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
{
int count;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
/*
* Note that the LAPIC address is obtained from the MADT (32-bit value)
* and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
*/
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
pr_err("Error parsing LAPIC address override entry\n");
return count;
}
register_lapic_address(acpi_lapic_addr);
return count;
}
static int __init acpi_parse_madt_lapic_entries(void)
{
int count, x2count = 0;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
acpi_parse_sapic, MAX_LOCAL_APIC);
if (!count) {
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
}
if (!count && !x2count) {
pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
acpi_parse_x2apic_nmi, 0);
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
return 0;
}
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
static void __init mp_config_acpi_legacy_irqs(void)
{
int i;
struct mpc_intsrc mp_irq;
#ifdef CONFIG_EISA
/*
* Fabricate the legacy ISA bus (bus #31).
*/
mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
#endif
set_bit(MP_ISA_BUS, mp_bus_not_pci);
pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs());
/*
* Use the default configuration for the IRQs 0-15. Unless
* overridden by (MADT) interrupt source override entries.
*/
for (i = 0; i < nr_legacy_irqs(); i++) {
int ioapic, pin;
unsigned int dstapic;
int idx;
u32 gsi;
/* Locate the gsi that irq i maps to. */
if (acpi_isa_irq_to_gsi(i, &gsi))
continue;
/*
* Locate the IOAPIC that manages the ISA IRQ.
*/
ioapic = mp_find_ioapic(gsi);
if (ioapic < 0)
continue;
pin = mp_find_ioapic_pin(ioapic, gsi);
dstapic = mpc_ioapic_id(ioapic);
for (idx = 0; idx < mp_irq_entries; idx++) {
struct mpc_intsrc *irq = mp_irqs + idx;
/* Do we already have a mapping for this ISA IRQ? */
if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
break;
/* Do we already have a mapping for this IOAPIC pin */
if (irq->dstapic == dstapic && irq->dstirq == pin)
break;
}
if (idx != mp_irq_entries) {
pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
mp_irq.type = MP_INTSRC;
mp_irq.irqflag = 0; /* Conforming */
mp_irq.srcbus = MP_ISA_BUS;
mp_irq.dstapic = dstapic;
mp_irq.irqtype = mp_INT;
mp_irq.srcbusirq = i; /* Identity mapped */
mp_irq.dstirq = pin;
mp_save_irq(&mp_irq);
}
}
/*
* Parse IOAPIC related entries in MADT
* returns 0 on success, < 0 on error
*/
static int __init acpi_parse_madt_ioapic_entries(void)
{
int count;
/*
* ACPI interpreter is required to complete interrupt setup,
* so if it is off, don't enumerate the io-apics with ACPI.
* If MPS is present, it will handle them,
* otherwise the system will stay in PIC mode
*/
if (acpi_disabled || acpi_noirq)
return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
/*
* if "noapic" boot option, don't look for IO-APICs
*/
if (ioapic_is_disabled) {
pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr,
irq_get_nr_irqs());
if (count < 0) {
pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
/*
* If BIOS did not supply an INT_SRC_OVR for the SCI
* pretend we got one so we can set the SCI flags.
* But ignore setting up SCI on hardware reduced platforms.
*/
if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
acpi_gbl_FADT.sci_interrupt);
/* Fill in identity legacy mappings where no override */
mp_config_acpi_legacy_irqs();
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src,
irq_get_nr_irqs());
if (count < 0) {
pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
return 0;
}
#else
static inline int acpi_parse_madt_ioapic_entries(void)
{
return -1;
}
#endif /* !CONFIG_X86_IO_APIC */
static void __init early_acpi_process_madt(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
int error;
if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
/*
* Parse MADT LAPIC entries
*/
error = early_acpi_parse_madt_lapic_addr_ovr();
if (!error) {
acpi_lapic = 1;
smp_found_config = 1;
}
if (error == -EINVAL) {
/*
* Dell Precision Workstation 410, 610 come here.
*/
pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
#endif
}
static void __init acpi_process_madt(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
int error;
if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
/*
* Parse MADT LAPIC entries
*/
error = acpi_parse_madt_lapic_entries();
if (!error) {
acpi_lapic = 1;
/*
* Parse MADT IO-APIC entries
*/
mutex_lock(&acpi_ioapic_lock);
error = acpi_parse_madt_ioapic_entries();
mutex_unlock(&acpi_ioapic_lock);
if (!error) {
acpi_set_irq_model_ioapic();
smp_found_config = 1;
}
#ifdef CONFIG_ACPI_MADT_WAKEUP
/*
* Parse MADT MP Wake entry.
*/
acpi_table_parse_madt(ACPI_MADT_TYPE_MULTIPROC_WAKEUP,
acpi_parse_mp_wake, 1);
#endif
}
if (error == -EINVAL) {
/*
* Dell Precision Workstation 410, 610 come here.
*/
pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
/*
* ACPI found no MADT, and so ACPI wants UP PIC mode.
* In the event an MPS table was found, forget it.
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
/*
* ACPI supports both logical (e.g. Hyper-Threading) and physical
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
acpi_noirq_set();
}
return 0;
}
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
acpi_disable_pci();
}
return 0;
}
static int __init disable_acpi_xsdt(const struct dmi_system_id *d)
{
if (!acpi_force) {
pr_notice("%s detected: force use of acpi=rsdt\n", d->ident);
acpi_gbl_do_not_use_xsdt = TRUE;
} else {
pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n");
}
return 0;
}
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
/*
* Force ignoring BIOS IRQ0 override
*/
static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
{
if (!acpi_skip_timer_override) {
pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
d->ident);
acpi_skip_timer_override = 1;
}
return 0;
}
/*
* ACPI offers an alternative platform interface model that removes
* ACPI hardware requirements for platforms that do not implement
* the PC Architecture.
*
* We initialize the Hardware-reduced ACPI model here:
*/
void __init acpi_generic_reduced_hw_init(void)
{
/*
* Override x86_init functions and bypass legacy PIC in
* hardware reduced ACPI mode.
*/
x86_init.timers.timer_init = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
legacy_pic = &null_legacy_pic;
}
static void __init acpi_reduced_hw_init(void)
{
if (acpi_gbl_reduced_hardware)
x86_init.acpi.reduced_hw_early_init();
}
/*
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact [email protected]
*/
static const struct dmi_system_id acpi_dmi_table[] __initconst = {
/*
* Boxes that need ACPI disabled
*/
{
.callback = dmi_disable_acpi,
.ident = "IBM Thinkpad",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
},
},
/*
* Boxes that need ACPI PCI IRQ routing disabled
*/
{
.callback = disable_acpi_irq,
.ident = "ASUS A7V",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
/* newer BIOS, Revision 1011, does work */
DMI_MATCH(DMI_BIOS_VERSION,
"ASUS A7V ACPI BIOS Revision 1007"),
},
},
{
/*
* Latest BIOS for IBM 600E (1.16) has bad pcinum
* for LPC bridge, which is needed for the PCI
* interrupt links to work. DSDT fix is in bug 5966.
* 2645, 2646 model numbers are shared with 600/600E/600X
*/
.callback = disable_acpi_irq,
.ident = "IBM Thinkpad 600 Series 2645",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
DMI_MATCH(DMI_BOARD_NAME, "2645"),
},
},
{
.callback = disable_acpi_irq,
.ident = "IBM Thinkpad 600 Series 2646",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
DMI_MATCH(DMI_BOARD_NAME, "2646"),
},
},
/*
* Boxes that need ACPI PCI IRQ routing and PCI scan disabled
*/
{ /* _BBN 0 bug */
.callback = disable_acpi_pci,
.ident = "ASUS PR-DLS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
DMI_MATCH(DMI_BIOS_VERSION,
"ASUS PR-DLS ACPI BIOS Revision 1010"),
DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
},
},
{
.callback = disable_acpi_pci,
.ident = "Acer TravelMate 36x Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
/*
* Boxes that need ACPI XSDT use disabled due to corrupted tables
*/
{
.callback = disable_acpi_xsdt,
.ident = "Advantech DAC-BJ01",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"),
DMI_MATCH(DMI_BIOS_VERSION, "V1.12"),
DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"),
},
},
{}
};
/* second table for DMI checks that should run after early-quirks */
static const struct dmi_system_id acpi_dmi_table_late[] __initconst = {
/*
* HP laptops which use a DSDT reporting as HP/SB400/10000,
* which includes some code which overrides all temperature
* trip points to 16C if the INTIN2 input of the I/O APIC
* is enabled. This input is incorrectly designated the
* ISA IRQ 0 via an interrupt source override even though
* it is wired to the output of the master 8259A and INTIN0
* is not connected at all. Force ignoring BIOS IRQ0
* override in that cases.
*/
{
.callback = dmi_ignore_irq0_timer_override,
.ident = "HP nx6115 laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
},
},
{
.callback = dmi_ignore_irq0_timer_override,
.ident = "HP NX6125 laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
},
},
{
.callback = dmi_ignore_irq0_timer_override,
.ident = "HP NX6325 laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
},
},
{
.callback = dmi_ignore_irq0_timer_override,
.ident = "HP 6715b laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
},
},
{
.callback = dmi_ignore_irq0_timer_override,
.ident = "FUJITSU SIEMENS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
},
{}
};
/*
* acpi_boot_table_init() and acpi_boot_init()
* called from setup_arch(), always.
* 1. checksums all tables
* 2. enumerates lapics
* 3. enumerates io-apics
*
* acpi_table_init() is separate to allow reading SRAT without
* other side effects.
*
* side effects of acpi_boot_init:
* acpi_lapic = 1 if LAPIC found
* acpi_ioapic = 1 if IOAPIC found
* if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
* if acpi_blacklisted() acpi_disabled = 1;
* acpi_irq_model=...
* ...
*/
void __init acpi_boot_table_init(void)
{
dmi_check_system(acpi_dmi_table);
/*
* If acpi_disabled, bail out
*/
if (acpi_disabled)
return;
/*
* Initialize the ACPI boot-time table parser.
*/
if (acpi_locate_initial_tables())
disable_acpi();
else
acpi_reserve_initial_tables();
}
int __init early_acpi_boot_init(void)
{
if (acpi_disabled)
return 1;
acpi_table_init_complete();
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
/*
* blacklist may disable ACPI entirely
*/
if (acpi_blacklisted()) {
if (acpi_force) {
pr_warn("acpi=force override\n");
} else {
pr_warn("Disabling ACPI support\n");
disable_acpi();
return 1;
}
}
/*
* Process the Multiple APIC Description Table (MADT), if present
*/
early_acpi_process_madt();
/*
* Hardware-reduced ACPI mode initialization:
*/
acpi_reduced_hw_init();
return 0;
}
int __init acpi_boot_init(void)
{
/* those are executed after early-quirks are executed */
dmi_check_system(acpi_dmi_table_late);
/*
* If acpi_disabled, bail out
*/
if (acpi_disabled)
return 1;
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
/*
* set sci_int and PM timer address
*/
acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
/*
* Process the Multiple APIC Description Table (MADT), if present
*/
acpi_process_madt();
acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
if (IS_ENABLED(CONFIG_ACPI_BGRT) && !acpi_nobgrt)
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
if (!acpi_noirq)
x86_init.pci.init = pci_acpi_init;
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
return 0;
}
static int __init parse_acpi(char *arg)
{
if (!arg)
return -EINVAL;
/* "acpi=off" disables both ACPI table parsing and interpreter */
if (strcmp(arg, "off") == 0) {
disable_acpi();
}
/* acpi=force to over-ride black-list */
else if (strcmp(arg, "force") == 0) {
acpi_force = 1;
acpi_disabled = 0;
}
/* acpi=strict disables out-of-spec workarounds */
else if (strcmp(arg, "strict") == 0) {
acpi_strict = 1;
}
/* acpi=rsdt use RSDT instead of XSDT */
else if (strcmp(arg, "rsdt") == 0) {
acpi_gbl_do_not_use_xsdt = TRUE;
}
/* "acpi=noirq" disables ACPI interrupt routing */
else if (strcmp(arg, "noirq") == 0) {
acpi_noirq_set();
}
/* "acpi=copy_dsdt" copies DSDT */
else if (strcmp(arg, "copy_dsdt") == 0) {
acpi_gbl_copy_dsdt_locally = 1;
}
/* "acpi=nocmcff" disables FF mode for corrected errors */
else if (strcmp(arg, "nocmcff") == 0) {
acpi_disable_cmcff = 1;
} else {
/* Core will printk when we return error. */
return -EINVAL;
}
return 0;
}
early_param("acpi", parse_acpi);
static int __init parse_acpi_bgrt(char *arg)
{
acpi_nobgrt = true;
return 0;
}
early_param("bgrt_disable", parse_acpi_bgrt);
/* FIXME: Using pci= for an ACPI parameter is a travesty. */
static int __init parse_pci(char *arg)
{
if (arg && strcmp(arg, "noacpi") == 0)
acpi_disable_pci();
return 0;
}
early_param("pci", parse_pci);
int __init acpi_mps_check(void)
{
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif
return 0;
}
#ifdef CONFIG_X86_IO_APIC
static int __init parse_acpi_skip_timer_override(char *arg)
{
acpi_skip_timer_override = 1;
return 0;
}
early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
static int __init parse_acpi_use_timer_override(char *arg)
{
acpi_use_timer_override = 1;
return 0;
}
early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
#endif /* CONFIG_X86_IO_APIC */
static int __init setup_acpi_sci(char *s)
{
if (!s)
return -EINVAL;
if (!strcmp(s, "edge"))
acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
else if (!strcmp(s, "level"))
acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
else if (!strcmp(s, "high"))
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
else if (!strcmp(s, "low"))
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
else
return -EINVAL;
return 0;
}
early_param("acpi_sci", setup_acpi_sci);
int __acpi_acquire_global_lock(unsigned int *lock)
{
unsigned int old, new, val;
old = READ_ONCE(*lock);
do {
val = (old >> 1) & 0x1;
new = (old & ~0x3) + 2 + val;
} while (!try_cmpxchg(lock, &old, new));
if (val)
return 0;
return -1;
}
int __acpi_release_global_lock(unsigned int *lock)
{
unsigned int old, new;
old = READ_ONCE(*lock);
do {
new = old & ~0x3;
} while (!try_cmpxchg(lock, &old, new));
return old & 0x1;
}
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
{
e820__range_add(addr, size, E820_TYPE_NVS);
e820__update_table_print();
}
void x86_default_set_root_pointer(u64 addr)
{
boot_params.acpi_rsdp_addr = addr;
}
u64 x86_default_get_root_pointer(void)
{
return boot_params.acpi_rsdp_addr;
}
#ifdef CONFIG_XEN_PV
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
return ioremap_cache(phys, size);
}
void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
x86_acpi_os_ioremap;
EXPORT_SYMBOL_GPL(acpi_os_ioremap);
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Lontium LT9211 bridge driver
*
* LT9211 is capable of converting:
* 2xDSI/2xLVDS/1xDPI -> 2xDSI/2xLVDS/1xDPI
* Currently supported is:
* 1xDSI -> 1xLVDS
*
* Copyright (C) 2022 Marek Vasut <[email protected]>
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#define REG_PAGE_CONTROL 0xff
#define REG_CHIPID0 0x8100
#define REG_CHIPID0_VALUE 0x18
#define REG_CHIPID1 0x8101
#define REG_CHIPID1_VALUE 0x01
#define REG_CHIPID2 0x8102
#define REG_CHIPID2_VALUE 0xe3
#define REG_DSI_LANE 0xd000
/* DSI lane count - 0 means 4 lanes ; 1, 2, 3 means 1, 2, 3 lanes. */
#define REG_DSI_LANE_COUNT(n) ((n) & 3)
struct lt9211 {
struct drm_bridge bridge;
struct device *dev;
struct regmap *regmap;
struct mipi_dsi_device *dsi;
struct drm_bridge *panel_bridge;
struct gpio_desc *reset_gpio;
struct regulator *vccio;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
};
static const struct regmap_range lt9211_rw_ranges[] = {
regmap_reg_range(0xff, 0xff),
regmap_reg_range(0x8100, 0x816b),
regmap_reg_range(0x8200, 0x82aa),
regmap_reg_range(0x8500, 0x85ff),
regmap_reg_range(0x8600, 0x86a0),
regmap_reg_range(0x8700, 0x8746),
regmap_reg_range(0xd000, 0xd0a7),
regmap_reg_range(0xd400, 0xd42c),
regmap_reg_range(0xd800, 0xd838),
regmap_reg_range(0xd9c0, 0xd9d5),
};
static const struct regmap_access_table lt9211_rw_table = {
.yes_ranges = lt9211_rw_ranges,
.n_yes_ranges = ARRAY_SIZE(lt9211_rw_ranges),
};
static const struct regmap_range_cfg lt9211_range = {
.name = "lt9211",
.range_min = 0x0000,
.range_max = 0xda00,
.selector_reg = REG_PAGE_CONTROL,
.selector_mask = 0xff,
.selector_shift = 0,
.window_start = 0,
.window_len = 0x100,
};
static const struct regmap_config lt9211_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.rd_table = <9211_rw_table,
.wr_table = <9211_rw_table,
.volatile_table = <9211_rw_table,
.ranges = <9211_range,
.num_ranges = 1,
.cache_type = REGCACHE_MAPLE,
.max_register = 0xda00,
};
static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
{
return container_of(bridge, struct lt9211, bridge);
}
static int lt9211_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
static int lt9211_read_chipid(struct lt9211 *ctx)
{
u8 chipid[3];
int ret;
/* Read Chip ID registers and verify the chip can communicate. */
ret = regmap_bulk_read(ctx->regmap, REG_CHIPID0, chipid, 3);
if (ret < 0) {
dev_err(ctx->dev, "Failed to read Chip ID: %d\n", ret);
return ret;
}
/* Test for known Chip ID. */
if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
chipid[2] != REG_CHIPID2_VALUE) {
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
chipid[0], chipid[1], chipid[2]);
return -EINVAL;
}
return 0;
}
static int lt9211_system_init(struct lt9211 *ctx)
{
const struct reg_sequence lt9211_system_init_seq[] = {
{ 0x8201, 0x18 },
{ 0x8606, 0x61 },
{ 0x8607, 0xa8 },
{ 0x8714, 0x08 },
{ 0x8715, 0x00 },
{ 0x8718, 0x0f },
{ 0x8722, 0x08 },
{ 0x8723, 0x00 },
{ 0x8726, 0x0f },
{ 0x810b, 0xfe },
};
return regmap_multi_reg_write(ctx->regmap, lt9211_system_init_seq,
ARRAY_SIZE(lt9211_system_init_seq));
}
static int lt9211_configure_rx(struct lt9211 *ctx)
{
const struct reg_sequence lt9211_rx_phy_seq[] = {
{ 0x8202, 0x44 },
{ 0x8204, 0xa0 },
{ 0x8205, 0x22 },
{ 0x8207, 0x9f },
{ 0x8208, 0xfc },
/* ORR with 0xf8 here to enable DSI DN/DP swap. */
{ 0x8209, 0x01 },
{ 0x8217, 0x0c },
{ 0x8633, 0x1b },
};
const struct reg_sequence lt9211_rx_cal_reset_seq[] = {
{ 0x8120, 0x7f },
{ 0x8120, 0xff },
};
const struct reg_sequence lt9211_rx_dig_seq[] = {
{ 0x8630, 0x85 },
/* 0x8588: BIT 6 set = MIPI-RX, BIT 4 unset = LVDS-TX */
{ 0x8588, 0x40 },
{ 0x85ff, 0xd0 },
{ REG_DSI_LANE, REG_DSI_LANE_COUNT(ctx->dsi->lanes) },
{ 0xd002, 0x05 },
};
const struct reg_sequence lt9211_rx_div_reset_seq[] = {
{ 0x810a, 0xc0 },
{ 0x8120, 0xbf },
};
const struct reg_sequence lt9211_rx_div_clear_seq[] = {
{ 0x810a, 0xc1 },
{ 0x8120, 0xff },
};
int ret;
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_phy_seq,
ARRAY_SIZE(lt9211_rx_phy_seq));
if (ret)
return ret;
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_cal_reset_seq,
ARRAY_SIZE(lt9211_rx_cal_reset_seq));
if (ret)
return ret;
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_dig_seq,
ARRAY_SIZE(lt9211_rx_dig_seq));
if (ret)
return ret;
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_reset_seq,
ARRAY_SIZE(lt9211_rx_div_reset_seq));
if (ret)
return ret;
usleep_range(10000, 15000);
return regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_clear_seq,
ARRAY_SIZE(lt9211_rx_div_clear_seq));
}
static int lt9211_autodetect_rx(struct lt9211 *ctx,
const struct drm_display_mode *mode)
{
u16 width, height;
u32 byteclk;
u8 buf[5];
u8 format;
u8 bc[3];
int ret;
/* Measure ByteClock frequency. */
ret = regmap_write(ctx->regmap, 0x8600, 0x01);
if (ret)
return ret;
/* Give the chip time to lock onto RX stream. */
msleep(100);
/* Read the ByteClock frequency from the chip. */
ret = regmap_bulk_read(ctx->regmap, 0x8608, bc, sizeof(bc));
if (ret)
return ret;
/* RX ByteClock in kHz */
byteclk = ((bc[0] & 0xf) << 16) | (bc[1] << 8) | bc[2];
/* Width/Height/Format Auto-detection */
ret = regmap_bulk_read(ctx->regmap, 0xd082, buf, sizeof(buf));
if (ret)
return ret;
width = (buf[0] << 8) | buf[1];
height = (buf[3] << 8) | buf[4];
format = buf[2] & 0xf;
if (format == 0x3) { /* YUV422 16bit */
width /= 2;
} else if (format == 0xa) { /* RGB888 24bit */
width /= 3;
} else {
dev_err(ctx->dev, "Unsupported DSI pixel format 0x%01x\n",
format);
return -EINVAL;
}
if (width != mode->hdisplay) {
dev_err(ctx->dev,
"RX: Detected DSI width (%d) does not match mode hdisplay (%d)\n",
width, mode->hdisplay);
return -EINVAL;
}
if (height != mode->vdisplay) {
dev_err(ctx->dev,
"RX: Detected DSI height (%d) does not match mode vdisplay (%d)\n",
height, mode->vdisplay);
return -EINVAL;
}
dev_dbg(ctx->dev, "RX: %dx%d format=0x%01x byteclock=%d kHz\n",
width, height, format, byteclk);
return 0;
}
static int lt9211_configure_timing(struct lt9211 *ctx,
const struct drm_display_mode *mode)
{
const struct reg_sequence lt9211_timing[] = {
{ 0xd00d, (mode->vtotal >> 8) & 0xff },
{ 0xd00e, mode->vtotal & 0xff },
{ 0xd00f, (mode->vdisplay >> 8) & 0xff },
{ 0xd010, mode->vdisplay & 0xff },
{ 0xd011, (mode->htotal >> 8) & 0xff },
{ 0xd012, mode->htotal & 0xff },
{ 0xd013, (mode->hdisplay >> 8) & 0xff },
{ 0xd014, mode->hdisplay & 0xff },
{ 0xd015, (mode->vsync_end - mode->vsync_start) & 0xff },
{ 0xd016, (mode->hsync_end - mode->hsync_start) & 0xff },
{ 0xd017, ((mode->vsync_start - mode->vdisplay) >> 8) & 0xff },
{ 0xd018, (mode->vsync_start - mode->vdisplay) & 0xff },
{ 0xd019, ((mode->hsync_start - mode->hdisplay) >> 8) & 0xff },
{ 0xd01a, (mode->hsync_start - mode->hdisplay) & 0xff },
};
return regmap_multi_reg_write(ctx->regmap, lt9211_timing,
ARRAY_SIZE(lt9211_timing));
}
static int lt9211_configure_plls(struct lt9211 *ctx,
const struct drm_display_mode *mode)
{
const struct reg_sequence lt9211_pcr_seq[] = {
{ 0xd026, 0x17 },
{ 0xd027, 0xc3 },
{ 0xd02d, 0x30 },
{ 0xd031, 0x10 },
{ 0xd023, 0x20 },
{ 0xd038, 0x02 },
{ 0xd039, 0x10 },
{ 0xd03a, 0x20 },
{ 0xd03b, 0x60 },
{ 0xd03f, 0x04 },
{ 0xd040, 0x08 },
{ 0xd041, 0x10 },
{ 0x810b, 0xee },
{ 0x810b, 0xfe },
};
unsigned int pval;
int ret;
/* DeSSC PLL reference clock is 25 MHz XTal. */
ret = regmap_write(ctx->regmap, 0x822d, 0x48);
if (ret)
return ret;
if (mode->clock < 44000) {
ret = regmap_write(ctx->regmap, 0x8235, 0x83);
} else if (mode->clock < 88000) {
ret = regmap_write(ctx->regmap, 0x8235, 0x82);
} else if (mode->clock < 176000) {
ret = regmap_write(ctx->regmap, 0x8235, 0x81);
} else {
dev_err(ctx->dev,
"Unsupported mode clock (%d kHz) above 176 MHz.\n",
mode->clock);
return -EINVAL;
}
if (ret)
return ret;
/* Wait for the DeSSC PLL to stabilize. */
msleep(100);
ret = regmap_multi_reg_write(ctx->regmap, lt9211_pcr_seq,
ARRAY_SIZE(lt9211_pcr_seq));
if (ret)
return ret;
/* PCR stability test takes seconds. */
ret = regmap_read_poll_timeout(ctx->regmap, 0xd087, pval, pval & 0x8,
20000, 10000000);
if (ret)
dev_err(ctx->dev, "PCR unstable, ret=%i\n", ret);
return ret;
}
static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
bool bpp24, bool de)
{
const struct reg_sequence system_lt9211_tx_phy_seq[] = {
/* DPI output disable */
{ 0x8262, 0x00 },
/* BIT(7) is LVDS dual-port */
{ 0x823b, 0x38 | (ctx->lvds_dual_link ? BIT(7) : 0) },
{ 0x823e, 0x92 },
{ 0x823f, 0x48 },
{ 0x8240, 0x31 },
{ 0x8243, 0x80 },
{ 0x8244, 0x00 },
{ 0x8245, 0x00 },
{ 0x8249, 0x00 },
{ 0x824a, 0x01 },
{ 0x824e, 0x00 },
{ 0x824f, 0x00 },
{ 0x8250, 0x00 },
{ 0x8253, 0x00 },
{ 0x8254, 0x01 },
/* LVDS channel order, Odd:Even 0x10..A:B, 0x40..B:A */
{ 0x8646, ctx->lvds_dual_link_even_odd_swap ? 0x40 : 0x10 },
{ 0x8120, 0x7b },
{ 0x816b, 0xff },
};
const struct reg_sequence system_lt9211_tx_dig_seq[] = {
{ 0x8559, 0x40 | (jeida ? BIT(7) : 0) |
(de ? BIT(5) : 0) | (bpp24 ? BIT(4) : 0) },
{ 0x855a, 0xaa },
{ 0x855b, 0xaa },
{ 0x855c, ctx->lvds_dual_link ? BIT(0) : 0 },
{ 0x85a1, 0x77 },
{ 0x8640, 0x40 },
{ 0x8641, 0x34 },
{ 0x8642, 0x10 },
{ 0x8643, 0x23 },
{ 0x8644, 0x41 },
{ 0x8645, 0x02 },
};
const struct reg_sequence system_lt9211_tx_pll_seq[] = {
/* TX PLL power down */
{ 0x8236, 0x01 },
{ 0x8237, ctx->lvds_dual_link ? 0x2a : 0x29 },
{ 0x8238, 0x06 },
{ 0x8239, 0x30 },
{ 0x823a, 0x8e },
{ 0x8737, 0x14 },
{ 0x8713, 0x00 },
{ 0x8713, 0x80 },
};
unsigned int pval;
int ret;
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_phy_seq,
ARRAY_SIZE(system_lt9211_tx_phy_seq));
if (ret)
return ret;
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_dig_seq,
ARRAY_SIZE(system_lt9211_tx_dig_seq));
if (ret)
return ret;
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_pll_seq,
ARRAY_SIZE(system_lt9211_tx_pll_seq));
if (ret)
return ret;
ret = regmap_read_poll_timeout(ctx->regmap, 0x871f, pval, pval & 0x80,
10000, 1000000);
if (ret) {
dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
return ret;
}
ret = regmap_read_poll_timeout(ctx->regmap, 0x8720, pval, pval & 0x80,
10000, 1000000);
if (ret) {
dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
return ret;
}
return 0;
}
static void lt9211_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
bool lvds_format_24bpp;
bool lvds_format_jeida;
u32 bus_flags;
int ret;
ret = regulator_enable(ctx->vccio);
if (ret) {
dev_err(ctx->dev, "Failed to enable vccio: %d\n", ret);
return;
}
/* Deassert reset */
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(20000, 21000); /* Very long post-reset delay. */
/* Get the LVDS format from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
bus_flags = bridge_state->output_bus_cfg.flags;
switch (bridge_state->output_bus_cfg.format) {
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
lvds_format_24bpp = false;
lvds_format_jeida = true;
break;
case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
lvds_format_24bpp = true;
lvds_format_jeida = true;
break;
case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
lvds_format_24bpp = true;
lvds_format_jeida = false;
break;
default:
/*
* Some bridges still don't set the correct
* LVDS bus pixel format, use SPWG24 default
* format until those are fixed.
*/
lvds_format_24bpp = true;
lvds_format_jeida = false;
dev_warn(ctx->dev,
"Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
bridge_state->output_bus_cfg.format);
break;
}
/*
* Retrieve the CRTC adjusted mode. This requires a little dance to go
* from the bridge to the encoder, to the connector and to the CRTC.
*/
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
mode = &crtc_state->adjusted_mode;
ret = lt9211_read_chipid(ctx);
if (ret)
return;
ret = lt9211_system_init(ctx);
if (ret)
return;
ret = lt9211_configure_rx(ctx);
if (ret)
return;
ret = lt9211_autodetect_rx(ctx, mode);
if (ret)
return;
ret = lt9211_configure_timing(ctx, mode);
if (ret)
return;
ret = lt9211_configure_plls(ctx, mode);
if (ret)
return;
ret = lt9211_configure_tx(ctx, lvds_format_jeida, lvds_format_24bpp,
bus_flags & DRM_BUS_FLAG_DE_HIGH);
if (ret)
return;
dev_dbg(ctx->dev, "LT9211 enabled.\n");
}
static void lt9211_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
int ret;
/*
* Put the chip in reset, pull nRST line low,
* and assure lengthy 10ms reset low timing.
*/
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(10000, 11000); /* Very long reset duration. */
ret = regulator_disable(ctx->vccio);
if (ret)
dev_err(ctx->dev, "Failed to disable vccio: %d\n", ret);
regcache_mark_dirty(ctx->regmap);
}
static enum drm_mode_status
lt9211_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/* LVDS output clock range 25..176 MHz */
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
if (mode->clock > 176000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
lt9211_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
/* This is the DSI-end bus format */
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
*num_input_fmts = 1;
return input_fmts;
}
static const struct drm_bridge_funcs lt9211_funcs = {
.attach = lt9211_attach,
.mode_valid = lt9211_mode_valid,
.atomic_enable = lt9211_atomic_enable,
.atomic_disable = lt9211_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_get_input_bus_fmts = lt9211_atomic_get_input_bus_fmts,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
static int lt9211_parse_dt(struct lt9211 *ctx)
{
struct device_node *port2, *port3;
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
struct drm_panel *panel;
int dual_link;
int ret;
ctx->vccio = devm_regulator_get(dev, "vccio");
if (IS_ERR(ctx->vccio))
return dev_err_probe(dev, PTR_ERR(ctx->vccio),
"Failed to get supply 'vccio'\n");
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
port2 = of_graph_get_port_by_id(dev->of_node, 2);
port3 = of_graph_get_port_by_id(dev->of_node, 3);
dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
of_node_put(port2);
of_node_put(port3);
if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
ctx->lvds_dual_link = true;
/* Odd pixels to LVDS Channel A, even pixels to B */
ctx->lvds_dual_link_even_odd_swap = false;
} else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
ctx->lvds_dual_link = true;
/* Even pixels to LVDS Channel A, odd pixels to B */
ctx->lvds_dual_link_even_odd_swap = true;
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
return ret;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
}
ctx->panel_bridge = panel_bridge;
return 0;
}
static int lt9211_host_attach(struct lt9211 *ctx)
{
const struct mipi_dsi_device_info info = {
.type = "lt9211",
.channel = 0,
.node = NULL,
};
struct device *dev = ctx->dev;
struct device_node *host_node;
struct device_node *endpoint;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
int dsi_lanes;
int ret;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
host_node = of_graph_get_remote_port_parent(endpoint);
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
of_node_put(endpoint);
if (!host)
return -EPROBE_DEFER;
if (dsi_lanes < 0)
return dsi_lanes;
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi))
return dev_err_probe(dev, PTR_ERR(dsi),
"failed to create dsi device\n");
ctx->dsi = dsi;
dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA |
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_NO_EOT_PACKET;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host: %d\n", ret);
return ret;
}
return 0;
}
static int lt9211_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lt9211 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
/*
* Put the chip in reset, pull nRST line low,
* and assure lengthy 10ms reset low timing.
*/
ctx->reset_gpio = devm_gpiod_get_optional(ctx->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return PTR_ERR(ctx->reset_gpio);
usleep_range(10000, 11000); /* Very long reset duration. */
ret = lt9211_parse_dt(ctx);
if (ret)
return ret;
ctx->regmap = devm_regmap_init_i2c(client, <9211_regmap_config);
if (IS_ERR(ctx->regmap))
return PTR_ERR(ctx->regmap);
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
ctx->bridge.funcs = <9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
ret = lt9211_host_attach(ctx);
if (ret)
drm_bridge_remove(&ctx->bridge);
return ret;
}
static void lt9211_remove(struct i2c_client *client)
{
struct lt9211 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
}
static struct i2c_device_id lt9211_id[] = {
{ "lontium,lt9211" },
{},
};
MODULE_DEVICE_TABLE(i2c, lt9211_id);
static const struct of_device_id lt9211_match_table[] = {
{ .compatible = "lontium,lt9211" },
{},
};
MODULE_DEVICE_TABLE(of, lt9211_match_table);
static struct i2c_driver lt9211_driver = {
.probe = lt9211_probe,
.remove = lt9211_remove,
.id_table = lt9211_id,
.driver = {
.name = "lt9211",
.of_match_table = lt9211_match_table,
},
};
module_i2c_driver(lt9211_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("Lontium LT9211 DSI/LVDS/DPI bridge driver");
MODULE_LICENSE("GPL");
|
/*
* P1020 UTM-PC Device Tree Source stub (no addresses or top-level ranges)
*
* Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
&lbc {
nor@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x0 0x0 0x2000000>;
bank-width = <2>;
device-width = <1>;
partition@0 {
/* 256KB for DTB Image */
reg = <0x0 0x00040000>;
label = "NOR DTB Image";
};
partition@40000 {
/* 3.75 MB for Linux Kernel Image */
reg = <0x00040000 0x003c0000>;
label = "NOR Linux Kernel Image";
};
partition@400000 {
/* 27MB for Root file System */
reg = <0x00400000 0x01b00000>;
label = "NOR Root File System";
};
partition@1f00000 {
/* This location must not be altered */
/* 512KB for u-boot Bootloader Image */
/* 512KB for u-boot Environment Variables */
reg = <0x01f00000 0x00100000>;
label = "NOR U-Boot Image";
read-only;
};
};
};
&soc {
i2c@3000 {
rtc@68 {
compatible = "dallas,ds1339";
reg = <0x68>;
};
};
mdio@24000 {
phy0: ethernet-phy@0 {
interrupts = <3 1 0 0>;
reg = <0x0>;
};
phy1: ethernet-phy@1 {
interrupts = <2 1 0 0>;
reg = <0x1>;
};
phy2: ethernet-phy@2 {
interrupts = <1 1 0 0>;
reg = <0x2>;
};
};
mdio@25000 {
tbi1: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
mdio@26000 {
tbi2: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
enet0: ethernet@b0000 {
phy-handle = <&phy2>;
phy-connection-type = "rgmii-id";
};
enet1: ethernet@b1000 {
phy-handle = <&phy0>;
tbi-handle = <&tbi1>;
phy-connection-type = "sgmii";
};
enet2: ethernet@b2000 {
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
usb@22000 {
phy_type = "ulpi";
};
/* USB2 is shared with localbus, so it must be disabled
by default. We can't put 'status = "disabled";' here
since U-Boot doesn't clear the status property when
it enables USB2. OTOH, U-Boot does create a new node
when there isn't any. So, just comment it out.
*/
usb@23000 {
status = "disabled";
phy_type = "ulpi";
};
};
|
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#ifndef __USDT_BPF_H__
#define __USDT_BPF_H__
#include <linux/errno.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
/* Below types and maps are internal implementation details of libbpf's USDT
* support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
* be considered an unstable API as well and might be adjusted based on user
* feedback from using libbpf's USDT support in production.
*/
/* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal
* map that keeps track of USDT argument specifications. This might be
* necessary if there are a lot of USDT attachments.
*/
#ifndef BPF_USDT_MAX_SPEC_CNT
#define BPF_USDT_MAX_SPEC_CNT 256
#endif
/* User can override BPF_USDT_MAX_IP_CNT to change default size of internal
* map that keeps track of IP (memory address) mapping to USDT argument
* specification.
* Note, if kernel supports BPF cookies, this map is not used and could be
* resized all the way to 1 to save a bit of memory.
*/
#ifndef BPF_USDT_MAX_IP_CNT
#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
#endif
enum __bpf_usdt_arg_type {
BPF_USDT_ARG_CONST,
BPF_USDT_ARG_REG,
BPF_USDT_ARG_REG_DEREF,
};
struct __bpf_usdt_arg_spec {
/* u64 scalar interpreted depending on arg_type, see below */
__u64 val_off;
/* arg location case, see bpf_usdt_arg() for details */
enum __bpf_usdt_arg_type arg_type;
/* offset of referenced register within struct pt_regs */
short reg_off;
/* whether arg should be interpreted as signed value */
bool arg_signed;
/* number of bits that need to be cleared and, optionally,
* sign-extended to cast arguments that are 1, 2, or 4 bytes
* long into final 8-byte u64/s64 value returned to user
*/
char arg_bitshift;
};
/* should match USDT_MAX_ARG_CNT in usdt.c exactly */
#define BPF_USDT_MAX_ARG_CNT 12
struct __bpf_usdt_spec {
struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT];
__u64 usdt_cookie;
short arg_cnt;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, BPF_USDT_MAX_SPEC_CNT);
__type(key, int);
__type(value, struct __bpf_usdt_spec);
} __bpf_usdt_specs SEC(".maps") __weak;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, BPF_USDT_MAX_IP_CNT);
__type(key, long);
__type(value, __u32);
} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig;
static __always_inline
int __bpf_usdt_spec_id(struct pt_regs *ctx)
{
if (!LINUX_HAS_BPF_COOKIE) {
long ip = PT_REGS_IP(ctx);
int *spec_id_ptr;
spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip);
return spec_id_ptr ? *spec_id_ptr : -ESRCH;
}
return bpf_get_attach_cookie(ctx);
}
/* Return number of USDT arguments defined for currently traced USDT. */
__weak __hidden
int bpf_usdt_arg_cnt(struct pt_regs *ctx)
{
struct __bpf_usdt_spec *spec;
int spec_id;
spec_id = __bpf_usdt_spec_id(ctx);
if (spec_id < 0)
return -ESRCH;
spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
if (!spec)
return -ESRCH;
return spec->arg_cnt;
}
/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
* Returns 0 on success; negative error, otherwise.
* On error *res is guaranteed to be set to zero.
*/
__weak __hidden
int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
{
struct __bpf_usdt_spec *spec;
struct __bpf_usdt_arg_spec *arg_spec;
unsigned long val;
int err, spec_id;
*res = 0;
spec_id = __bpf_usdt_spec_id(ctx);
if (spec_id < 0)
return -ESRCH;
spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
if (!spec)
return -ESRCH;
if (arg_num >= BPF_USDT_MAX_ARG_CNT)
return -ENOENT;
barrier_var(arg_num);
if (arg_num >= spec->arg_cnt)
return -ENOENT;
arg_spec = &spec->args[arg_num];
switch (arg_spec->arg_type) {
case BPF_USDT_ARG_CONST:
/* Arg is just a constant ("-4@$-9" in USDT arg spec).
* value is recorded in arg_spec->val_off directly.
*/
val = arg_spec->val_off;
break;
case BPF_USDT_ARG_REG:
/* Arg is in a register (e.g, "8@%rax" in USDT arg spec),
* so we read the contents of that register directly from
* struct pt_regs. To keep things simple user-space parts
* record offsetof(struct pt_regs, <regname>) in arg_spec->reg_off.
*/
err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
if (err)
return err;
break;
case BPF_USDT_ARG_REG_DEREF:
/* Arg is in memory addressed by register, plus some offset
* (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is
* identified like with BPF_USDT_ARG_REG case, and the offset
* is in arg_spec->val_off. We first fetch register contents
* from pt_regs, then do another user-space probe read to
* fetch argument value itself.
*/
err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
if (err)
return err;
err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off);
if (err)
return err;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
val >>= arg_spec->arg_bitshift;
#endif
break;
default:
return -EINVAL;
}
/* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing
* necessary upper arg_bitshift bits, with sign extension if argument
* is signed
*/
val <<= arg_spec->arg_bitshift;
if (arg_spec->arg_signed)
val = ((long)val) >> arg_spec->arg_bitshift;
else
val = val >> arg_spec->arg_bitshift;
*res = val;
return 0;
}
/* Retrieve user-specified cookie value provided during attach as
* bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie
* returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself
* utilizing BPF cookies internally, so user can't use BPF cookie directly
* for USDT programs and has to use bpf_usdt_cookie() API instead.
*/
__weak __hidden
long bpf_usdt_cookie(struct pt_regs *ctx)
{
struct __bpf_usdt_spec *spec;
int spec_id;
spec_id = __bpf_usdt_spec_id(ctx);
if (spec_id < 0)
return 0;
spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
if (!spec)
return 0;
return spec->usdt_cookie;
}
/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
#define ___bpf_usdt_args0() ctx
#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); _x; })
#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); _x; })
#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); _x; })
#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); _x; })
#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); _x; })
#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); _x; })
#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); _x; })
#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); _x; })
#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); _x; })
#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); _x; })
#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); _x; })
#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); _x; })
#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
/*
* BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for
* tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes.
* Original struct pt_regs * context is preserved as 'ctx' argument.
*/
#define BPF_USDT(name, args...) \
name(struct pt_regs *ctx); \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_usdt_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
#endif /* __USDT_BPF_H__ */
|
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef MLX5_FPGA_SDK_H
#define MLX5_FPGA_SDK_H
#include <linux/types.h>
#include <linux/dma-direction.h>
/**
* DOC: Innova SDK
* This header defines the in-kernel API for Innova FPGA client drivers.
*/
#define SBU_QP_QUEUE_SIZE 8
#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000)
/**
* enum mlx5_fpga_access_type - Enumerated the different methods possible for
* accessing the device memory address space
*
* @MLX5_FPGA_ACCESS_TYPE_I2C: Use the slow CX-FPGA I2C bus
* @MLX5_FPGA_ACCESS_TYPE_DONTCARE: Use the fastest available method
*/
enum mlx5_fpga_access_type {
MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
};
struct mlx5_fpga_conn;
struct mlx5_fpga_device;
/**
* struct mlx5_fpga_dma_entry - A scatter-gather DMA entry
*/
struct mlx5_fpga_dma_entry {
/** @data: Virtual address pointer to the data */
void *data;
/** @size: Size in bytes of the data */
unsigned int size;
/** @dma_addr: Private member. Physical DMA-mapped address of the data */
dma_addr_t dma_addr;
};
/**
* struct mlx5_fpga_dma_buf - A packet buffer
* May contain up to 2 scatter-gather data entries
*/
struct mlx5_fpga_dma_buf {
/** @dma_dir: DMA direction */
enum dma_data_direction dma_dir;
/** @sg: Scatter-gather entries pointing to the data in memory */
struct mlx5_fpga_dma_entry sg[2];
/** @list: Item in SQ backlog, for TX packets */
struct list_head list;
/**
* @complete: Completion routine, for TX packets
* @conn: FPGA Connection this packet was sent to
* @fdev: FPGA device this packet was sent to
* @buf: The packet buffer
* @status: 0 if successful, or an error code otherwise
*/
void (*complete)(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *buf, u8 status);
};
/**
* struct mlx5_fpga_conn_attr - FPGA connection attributes
* Describes the attributes of a connection
*/
struct mlx5_fpga_conn_attr {
/** @tx_size: Size of connection TX queue, in packets */
unsigned int tx_size;
/** @rx_size: Size of connection RX queue, in packets */
unsigned int rx_size;
/**
* @recv_cb: Callback function which is called for received packets
* @cb_arg: The value provided in mlx5_fpga_conn_attr.cb_arg
* @buf: A buffer containing a received packet
*
* buf is guaranteed to only contain a single scatter-gather entry.
* The size of the actual packet received is specified in buf.sg[0].size
* When this callback returns, the packet buffer may be re-used for
* subsequent receives.
*/
void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
/** @cb_arg: A context to be passed to recv_cb callback */
void *cb_arg;
};
/**
* mlx5_fpga_sbu_conn_create() - Initialize a new FPGA SBU connection
* @fdev: The FPGA device
* @attr: Attributes of the new connection
*
* Sets up a new FPGA SBU connection with the specified attributes.
* The receive callback function may be called for incoming messages even
* before this function returns.
*
* The caller must eventually destroy the connection by calling
* mlx5_fpga_sbu_conn_destroy.
*
* Return: A new connection, or ERR_PTR() error value otherwise.
*/
struct mlx5_fpga_conn *
mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
struct mlx5_fpga_conn_attr *attr);
/**
* mlx5_fpga_sbu_conn_destroy() - Destroy an FPGA SBU connection
* @conn: The FPGA SBU connection to destroy
*
* Cleans up an FPGA SBU connection which was previously created with
* mlx5_fpga_sbu_conn_create.
*/
void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
/**
* mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
* @conn: An FPGA SBU connection
* @buf: The packet buffer
*
* Queues a packet for transmission over an FPGA SBU connection.
* The buffer should not be modified or freed until completion.
* Upon completion, the buf's complete() callback is invoked, indicating the
* success or error status of the transmission.
*
* Return: 0 if successful, or an error value otherwise.
*/
int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf);
/**
* mlx5_fpga_mem_read() - Read from FPGA memory address space
* @fdev: The FPGA device
* @size: Size of chunk to read, in bytes
* @addr: Starting address to read from, in FPGA address space
* @buf: Buffer to read into
* @access_type: Method for reading
*
* Reads from the specified address into the specified buffer.
* The address may point to configuration space or to DDR.
* Large reads may be performed internally as several non-atomic operations.
* This function may sleep, so should not be called from atomic contexts.
*
* Return: 0 if successful, or an error value otherwise.
*/
int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
void *buf, enum mlx5_fpga_access_type access_type);
/**
* mlx5_fpga_mem_write() - Write to FPGA memory address space
* @fdev: The FPGA device
* @size: Size of chunk to write, in bytes
* @addr: Starting address to write to, in FPGA address space
* @buf: Buffer which contains data to write
* @access_type: Method for writing
*
* Writes the specified buffer data to FPGA memory at the specified address.
* The address may point to configuration space or to DDR.
* Large writes may be performed internally as several non-atomic operations.
* This function may sleep, so should not be called from atomic contexts.
*
* Return: 0 if successful, or an error value otherwise.
*/
int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
void *buf, enum mlx5_fpga_access_type access_type);
/**
* mlx5_fpga_get_sbu_caps() - Read the SBU capabilities
* @fdev: The FPGA device
* @size: Size of the buffer to read into
* @buf: Buffer to read the capabilities into
*
* Reads the FPGA SBU capabilities into the specified buffer.
* The format of the capabilities buffer is SBU-dependent.
*
* Return: 0 if successful
* -EINVAL if the buffer is not large enough to contain SBU caps
* or any other error value otherwise.
*/
int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf);
#endif /* MLX5_FPGA_SDK_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*/
#include "ieee754sp.h"
union ieee754sp ieee754sp_neg(union ieee754sp x)
{
union ieee754sp y;
if (ieee754_csr.abs2008) {
y = x;
SPSIGN(y) = !SPSIGN(x);
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
y = ieee754sp_sub(ieee754sp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
union ieee754sp ieee754sp_abs(union ieee754sp x)
{
union ieee754sp y;
if (ieee754_csr.abs2008) {
y = x;
SPSIGN(y) = 0;
} else {
unsigned int oldrm;
oldrm = ieee754_csr.rm;
ieee754_csr.rm = FPU_CSR_RD;
if (SPSIGN(x))
y = ieee754sp_sub(ieee754sp_zero(0), x);
else
y = ieee754sp_add(ieee754sp_zero(0), x);
ieee754_csr.rm = oldrm;
}
return y;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* MC33880 high-side/low-side switch GPIO driver
* Copyright (c) 2009 Intel Corporation
*/
/* Supports:
* Freescale MC33880 high-side/low-side switch
*/
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/spi/mc33880.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/module.h>
#define DRIVER_NAME "mc33880"
/*
* Pin configurations, see MAX7301 datasheet page 6
*/
#define PIN_CONFIG_MASK 0x03
#define PIN_CONFIG_IN_PULLUP 0x03
#define PIN_CONFIG_IN_WO_PULLUP 0x02
#define PIN_CONFIG_OUT 0x01
#define PIN_NUMBER 8
/*
* Some registers must be read back to modify.
* To save time we cache them here in memory
*/
struct mc33880 {
struct mutex lock; /* protect from simultaneous accesses */
u8 port_config;
struct gpio_chip chip;
struct spi_device *spi;
};
static int mc33880_write_config(struct mc33880 *mc)
{
return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config));
}
static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
{
if (value)
mc->port_config |= 1 << offset;
else
mc->port_config &= ~(1 << offset);
return mc33880_write_config(mc);
}
static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct mc33880 *mc = gpiochip_get_data(chip);
mutex_lock(&mc->lock);
__mc33880_set(mc, offset, value);
mutex_unlock(&mc->lock);
}
static int mc33880_probe(struct spi_device *spi)
{
struct mc33880 *mc;
struct mc33880_platform_data *pdata;
int ret;
pdata = dev_get_platdata(&spi->dev);
if (!pdata || !pdata->base) {
dev_dbg(&spi->dev, "incorrect or missing platform data\n");
return -EINVAL;
}
/*
* bits_per_word cannot be configured in platform data
*/
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
return ret;
mc = devm_kzalloc(&spi->dev, sizeof(struct mc33880), GFP_KERNEL);
if (!mc)
return -ENOMEM;
mutex_init(&mc->lock);
spi_set_drvdata(spi, mc);
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
mc->chip.parent = &spi->dev;
mc->chip.owner = THIS_MODULE;
mc->port_config = 0x00;
/* write twice, because during initialisation the first setting
* is just for testing SPI communication, and the second is the
* "real" configuration
*/
ret = mc33880_write_config(mc);
mc->port_config = 0x00;
if (!ret)
ret = mc33880_write_config(mc);
if (ret) {
dev_err(&spi->dev, "Failed writing to " DRIVER_NAME ": %d\n",
ret);
goto exit_destroy;
}
ret = gpiochip_add_data(&mc->chip, mc);
if (ret)
goto exit_destroy;
return ret;
exit_destroy:
mutex_destroy(&mc->lock);
return ret;
}
static void mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
mc = spi_get_drvdata(spi);
gpiochip_remove(&mc->chip);
mutex_destroy(&mc->lock);
}
static struct spi_driver mc33880_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = mc33880_probe,
.remove = mc33880_remove,
};
static int __init mc33880_init(void)
{
return spi_register_driver(&mc33880_driver);
}
/* register after spi postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(mc33880_init);
static void __exit mc33880_exit(void)
{
spi_unregister_driver(&mc33880_driver);
}
module_exit(mc33880_exit);
MODULE_AUTHOR("Mocean Laboratories <[email protected]>");
MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver");
MODULE_LICENSE("GPL v2");
|
/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
*
* Author : Liu Junliang <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _SR9800_H
#define _SR9800_H
/* SR9800 spec. command table on Linux Platform */
/* command : Software Station Management Control Reg */
#define SR_CMD_SET_SW_MII 0x06
/* command : PHY Read Reg */
#define SR_CMD_READ_MII_REG 0x07
/* command : PHY Write Reg */
#define SR_CMD_WRITE_MII_REG 0x08
/* command : Hardware Station Management Control Reg */
#define SR_CMD_SET_HW_MII 0x0a
/* command : SROM Read Reg */
#define SR_CMD_READ_EEPROM 0x0b
/* command : SROM Write Reg */
#define SR_CMD_WRITE_EEPROM 0x0c
/* command : SROM Write Enable Reg */
#define SR_CMD_WRITE_ENABLE 0x0d
/* command : SROM Write Disable Reg */
#define SR_CMD_WRITE_DISABLE 0x0e
/* command : RX Control Read Reg */
#define SR_CMD_READ_RX_CTL 0x0f
#define SR_RX_CTL_PRO (1 << 0)
#define SR_RX_CTL_AMALL (1 << 1)
#define SR_RX_CTL_SEP (1 << 2)
#define SR_RX_CTL_AB (1 << 3)
#define SR_RX_CTL_AM (1 << 4)
#define SR_RX_CTL_AP (1 << 5)
#define SR_RX_CTL_ARP (1 << 6)
#define SR_RX_CTL_SO (1 << 7)
#define SR_RX_CTL_RH1M (1 << 8)
#define SR_RX_CTL_RH2M (1 << 9)
#define SR_RX_CTL_RH3M (1 << 10)
/* command : RX Control Write Reg */
#define SR_CMD_WRITE_RX_CTL 0x10
/* command : IPG0/IPG1/IPG2 Control Read Reg */
#define SR_CMD_READ_IPG012 0x11
/* command : IPG0/IPG1/IPG2 Control Write Reg */
#define SR_CMD_WRITE_IPG012 0x12
/* command : Node ID Read Reg */
#define SR_CMD_READ_NODE_ID 0x13
/* command : Node ID Write Reg */
#define SR_CMD_WRITE_NODE_ID 0x14
/* command : Multicast Filter Array Read Reg */
#define SR_CMD_READ_MULTI_FILTER 0x15
/* command : Multicast Filter Array Write Reg */
#define SR_CMD_WRITE_MULTI_FILTER 0x16
/* command : Eth/HomePNA PHY Address Reg */
#define SR_CMD_READ_PHY_ID 0x19
/* command : Medium Status Read Reg */
#define SR_CMD_READ_MEDIUM_STATUS 0x1a
#define SR_MONITOR_LINK (1 << 1)
#define SR_MONITOR_MAGIC (1 << 2)
#define SR_MONITOR_HSFS (1 << 4)
/* command : Medium Status Write Reg */
#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
#define SR_MEDIUM_GM (1 << 0)
#define SR_MEDIUM_FD (1 << 1)
#define SR_MEDIUM_AC (1 << 2)
#define SR_MEDIUM_ENCK (1 << 3)
#define SR_MEDIUM_RFC (1 << 4)
#define SR_MEDIUM_TFC (1 << 5)
#define SR_MEDIUM_JFE (1 << 6)
#define SR_MEDIUM_PF (1 << 7)
#define SR_MEDIUM_RE (1 << 8)
#define SR_MEDIUM_PS (1 << 9)
#define SR_MEDIUM_RSV (1 << 10)
#define SR_MEDIUM_SBP (1 << 11)
#define SR_MEDIUM_SM (1 << 12)
/* command : Monitor Mode Status Read Reg */
#define SR_CMD_READ_MONITOR_MODE 0x1c
/* command : Monitor Mode Status Write Reg */
#define SR_CMD_WRITE_MONITOR_MODE 0x1d
/* command : GPIO Status Read Reg */
#define SR_CMD_READ_GPIOS 0x1e
#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
/* command : GPIO Status Write Reg */
#define SR_CMD_WRITE_GPIOS 0x1f
/* command : Eth PHY Power and Reset Control Reg */
#define SR_CMD_SW_RESET 0x20
#define SR_SWRESET_CLEAR 0x00
#define SR_SWRESET_RR (1 << 0)
#define SR_SWRESET_RT (1 << 1)
#define SR_SWRESET_PRTE (1 << 2)
#define SR_SWRESET_PRL (1 << 3)
#define SR_SWRESET_BZ (1 << 4)
#define SR_SWRESET_IPRL (1 << 5)
#define SR_SWRESET_IPPD (1 << 6)
/* command : Software Interface Selection Status Read Reg */
#define SR_CMD_SW_PHY_STATUS 0x21
/* command : Software Interface Selection Status Write Reg */
#define SR_CMD_SW_PHY_SELECT 0x22
/* command : BULK in Buffer Size Reg */
#define SR_CMD_BULKIN_SIZE 0x2A
/* command : LED_MUX Control Reg */
#define SR_CMD_LED_MUX 0x70
#define SR_LED_MUX_TX_ACTIVE (1 << 0)
#define SR_LED_MUX_RX_ACTIVE (1 << 1)
#define SR_LED_MUX_COLLISION (1 << 2)
#define SR_LED_MUX_DUP_COL (1 << 3)
#define SR_LED_MUX_DUP (1 << 4)
#define SR_LED_MUX_SPEED (1 << 5)
#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
#define SR_LED_MUX_LINK (1 << 7)
/* Register Access Flags */
#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
/* Multicast Filter Array size & Max Number */
#define SR_MCAST_FILTER_SIZE 8
#define SR_MAX_MCAST 64
/* IPG0/1/2 Default Value */
#define SR9800_IPG0_DEFAULT 0x15
#define SR9800_IPG1_DEFAULT 0x0c
#define SR9800_IPG2_DEFAULT 0x12
/* Medium Status Default Mode */
#define SR9800_MEDIUM_DEFAULT \
(SR_MEDIUM_FD | SR_MEDIUM_RFC | \
SR_MEDIUM_TFC | SR_MEDIUM_PS | \
SR_MEDIUM_AC | SR_MEDIUM_RE)
/* RX Control Default Setting */
#define SR_DEFAULT_RX_CTL \
(SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
/* EEPROM Magic Number & EEPROM Size */
#define SR_EEPROM_MAGIC 0xdeadbeef
#define SR9800_EEPROM_LEN 0xff
/* SR9800 Driver Version and Driver Name */
#define DRIVER_VERSION "11-Nov-2013"
#define DRIVER_NAME "CoreChips"
#define DRIVER_FLAG \
(FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
/* SR9800 BULKIN Buffer Size */
#define SR9800_MAX_BULKIN_2K 0
#define SR9800_MAX_BULKIN_4K 1
#define SR9800_MAX_BULKIN_6K 2
#define SR9800_MAX_BULKIN_8K 3
#define SR9800_MAX_BULKIN_16K 4
#define SR9800_MAX_BULKIN_20K 5
#define SR9800_MAX_BULKIN_24K 6
#define SR9800_MAX_BULKIN_32K 7
static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
/* 2k */
{2048, 0x8000, 0x8001},
/* 4k */
{4096, 0x8100, 0x8147},
/* 6k */
{6144, 0x8200, 0x81EB},
/* 8k */
{8192, 0x8300, 0x83D7},
/* 16 */
{16384, 0x8400, 0x851E},
/* 20k */
{20480, 0x8500, 0x8666},
/* 24k */
{24576, 0x8600, 0x87AE},
/* 32k */
{32768, 0x8700, 0x8A3D},
};
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct sr_data {
u8 multi_filter[SR_MCAST_FILTER_SIZE];
u8 mac_addr[ETH_ALEN];
u8 phymode;
u8 ledmode;
u8 eeprom_len;
};
struct sr9800_int_data {
__le16 res1;
u8 link;
__le16 res2;
u8 status;
__le16 res3;
} __packed;
#endif /* _SR9800_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_MIGRATE_H
#define _BCACHEFS_MIGRATE_H
int bch2_dev_data_drop(struct bch_fs *, unsigned, int);
#endif /* _BCACHEFS_MIGRATE_H */
|
/*
* Copyright 2016 - Lee Jones <[email protected]>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this file; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Or, alternatively,
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "stm32f469.dtsi"
#include "stm32f469-pinctrl.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
/ {
model = "STMicroelectronics STM32F469i-DISCO board";
compatible = "st,stm32f469i-disco", "st,stm32f469";
chosen {
bootargs = "root=/dev/ram";
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x1000000>;
};
aliases {
serial0 = &usart3;
};
vcc_3v3: vcc-3v3 {
compatible = "regulator-fixed";
regulator-name = "vcc_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
soc {
dma-ranges = <0xc0000000 0x0 0x10000000>;
};
leds {
compatible = "gpio-leds";
led-green {
gpios = <&gpiog 6 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
};
led-orange {
gpios = <&gpiod 4 GPIO_ACTIVE_LOW>;
};
led-red {
gpios = <&gpiod 5 GPIO_ACTIVE_LOW>;
};
led-blue {
gpios = <&gpiok 3 GPIO_ACTIVE_LOW>;
};
};
gpio-keys {
compatible = "gpio-keys";
autorepeat;
button-0 {
label = "User";
linux,code = <KEY_WAKEUP>;
gpios = <&gpioa 0 GPIO_ACTIVE_HIGH>;
};
};
/* This turns on vbus for otg for host mode (dwc2) */
vcc5v_otg: vcc5v-otg-regulator {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpiob 2 GPIO_ACTIVE_HIGH>;
regulator-name = "vcc5_host1";
regulator-always-on;
};
};
&rcc {
compatible = "st,stm32f469-rcc", "st,stm32f42xx-rcc", "st,stm32-rcc";
};
&clk_hse {
clock-frequency = <8000000>;
};
&dma2d {
status = "okay";
};
&dsi {
#address-cells = <1>;
#size-cells = <0>;
status = "okay";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi_in: endpoint {
remote-endpoint = <<dc_out_dsi>;
};
};
port@1 {
reg = <1>;
dsi_out: endpoint {
remote-endpoint = <&dsi_panel_in>;
};
};
};
panel@0 {
compatible = "orisetech,otm8009a";
reg = <0>; /* dsi virtual channel (0..3) */
reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
power-supply = <&vcc_3v3>;
status = "okay";
port {
dsi_panel_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
};
};
<dc {
status = "okay";
port {
ltdc_out_dsi: endpoint {
remote-endpoint = <&dsi_in>;
};
};
};
&rtc {
status = "okay";
};
&timers1 {
status = "okay";
pwm {
pinctrl-0 = <&pwm1_pins>;
pinctrl-names = "default";
status = "okay";
};
timer@0 {
status = "okay";
};
};
&timers3 {
status = "okay";
pwm {
pinctrl-0 = <&pwm3_pins>;
pinctrl-names = "default";
status = "okay";
};
timer@2 {
status = "okay";
};
};
&sdio {
status = "okay";
vmmc-supply = <&vcc_3v3>;
cd-gpios = <&gpiog 2 GPIO_ACTIVE_LOW>;
broken-cd;
pinctrl-names = "default", "opendrain";
pinctrl-0 = <&sdio_pins>;
pinctrl-1 = <&sdio_pins_od>;
bus-width = <4>;
};
&timers5 {
/* Override timer5 to act as clockevent */
compatible = "st,stm32-timer";
interrupts = <50>;
status = "okay";
/delete-property/#address-cells;
/delete-property/#size-cells;
/delete-property/clock-names;
/delete-node/pwm;
/delete-node/timer@4;
};
&usart3 {
pinctrl-0 = <&usart3_pins_a>;
pinctrl-names = "default";
status = "okay";
};
&usbotg_fs {
dr_mode = "host";
pinctrl-0 = <&usbotg_fs_pins_a>;
pinctrl-names = "default";
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include "clk.h"
#define pll_out_override(p) (BIT((p->shift - 6)))
#define div_mask(d) ((1 << (d->width)) - 1)
#define get_mul(d) (1 << d->frac_width)
#define get_max_div(d) div_mask(d)
#define PERIPH_CLK_UART_DIV_ENB BIT(24)
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
int div;
div = div_frac_get(rate, parent_rate, divider->width,
divider->frac_width, divider->flags);
if (div < 0)
return 0;
return div;
}
static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
u32 reg;
int div, mul;
u64 rate = parent_rate;
reg = readl_relaxed(divider->reg);
if ((divider->flags & TEGRA_DIVIDER_UART) &&
!(reg & PERIPH_CLK_UART_DIV_ENB))
return rate;
div = (reg >> divider->shift) & div_mask(divider);
mul = get_mul(divider);
div += mul;
rate *= mul;
rate += div - 1;
do_div(rate, div);
return rate;
}
static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
unsigned long output_rate = *prate;
if (!rate)
return output_rate;
div = get_div(divider, rate, output_rate);
if (div < 0)
return *prate;
mul = get_mul(divider);
return DIV_ROUND_UP(output_rate * mul, div + mul);
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div;
unsigned long flags = 0;
u32 val;
div = get_div(divider, rate, parent_rate);
if (div < 0)
return div;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
val = readl_relaxed(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
val |= div << divider->shift;
if (divider->flags & TEGRA_DIVIDER_UART) {
if (div)
val |= PERIPH_CLK_UART_DIV_ENB;
else
val &= ~PERIPH_CLK_UART_DIV_ENB;
}
if (divider->flags & TEGRA_DIVIDER_FIXED)
val |= pll_out_override(divider);
writel_relaxed(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
return 0;
}
static void clk_divider_restore_context(struct clk_hw *hw)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long parent_rate = clk_hw_get_rate(parent);
unsigned long rate = clk_hw_get_rate(hw);
if (clk_frac_div_set_rate(hw, rate, parent_rate) < 0)
WARN_ON(1);
}
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
.round_rate = clk_frac_div_round_rate,
.restore_context = clk_divider_restore_context,
};
struct clk *tegra_clk_register_divider(const char *name,
const char *parent_name, void __iomem *reg,
unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
u8 frac_width, spinlock_t *lock)
{
struct tegra_clk_frac_div *divider;
struct clk *clk;
struct clk_init_data init;
divider = kzalloc(sizeof(*divider), GFP_KERNEL);
if (!divider) {
pr_err("%s: could not allocate fractional divider clk\n",
__func__);
return ERR_PTR(-ENOMEM);
}
init.name = name;
init.ops = &tegra_clk_frac_div_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
divider->reg = reg;
divider->shift = shift;
divider->width = width;
divider->frac_width = frac_width;
divider->lock = lock;
divider->flags = clk_divider_flags;
/* Data in .init is copied by clk_register(), so stack variable OK */
divider->hw.init = &init;
clk = clk_register(NULL, ÷r->hw);
if (IS_ERR(clk))
kfree(divider);
return clk;
}
static const struct clk_div_table mc_div_table[] = {
{ .val = 0, .div = 2 },
{ .val = 1, .div = 1 },
{ .val = 0, .div = 0 },
};
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
return clk_register_divider_table(NULL, name, parent_name,
CLK_IS_CRITICAL,
reg, 16, 1, CLK_DIVIDER_READ_ONLY,
mc_div_table, lock);
}
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_QUERY_H_
#define _XE_QUERY_H_
struct drm_device;
struct drm_file;
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Merrifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2012 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2012 Silicon Hive www.siliconhive.com.
*/
#include "type_support.h"
#include "mmu/isp_mmu.h"
#include "mmu/sh_mmu_mrfld.h"
#include "atomisp_compat.h"
#define MERR_VALID_PTE_MASK 0x80000000
/*
* include SH header file here
*/
static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
phys_addr_t phys)
{
return phys >> ISP_PAGE_OFFSET;
}
static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
unsigned int pte)
{
unsigned int mask = mmu->driver->pte_valid_mask;
return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET);
}
static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
phys_addr_t phys)
{
unsigned int pte = sh_phys_to_pte(mmu, phys);
return HOST_ADDRESS(pte);
}
/*
* callback to flush tlb.
*
* tlb_flush_range will at least flush TLBs containing
* address mapping from addr to addr + size.
*
* tlb_flush_all will flush all TLBs.
*
* tlb_flush_all is must be provided. if tlb_flush_range is
* not valid, it will set to tlb_flush_all by default.
*/
static void sh_tlb_flush(struct isp_mmu *mmu)
{
ia_css_mmu_invalidate_cache();
}
struct isp_mmu_client sh_mmu_mrfld = {
.name = "Silicon Hive ISP3000 MMU",
.pte_valid_mask = MERR_VALID_PTE_MASK,
.null_pte = ~MERR_VALID_PTE_MASK,
.get_pd_base = sh_get_pd_base,
.tlb_flush_all = sh_tlb_flush,
.phys_to_pte = sh_phys_to_pte,
.pte_to_phys = sh_pte_to_phys,
};
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int count = 0;
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
return 0;
}
char __license[] SEC("license") = "GPL";
|
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
AudioScience HPI driver
Copyright (C) 1997-2012 AudioScience Inc. <[email protected]>
HPI Operating System function implementation for Linux
(C) Copyright AudioScience Inc. 1997-2003
******************************************************************************/
#define SOURCEFILE_NAME "hpios.c"
#include "hpi_internal.h"
#include "hpidebug.h"
#include <linux/delay.h>
#include <linux/sched.h>
void hpios_delay_micro_seconds(u32 num_micro_sec)
{
if ((usecs_to_jiffies(num_micro_sec) > 1) && !in_interrupt()) {
/* MUST NOT SCHEDULE IN INTERRUPT CONTEXT! */
schedule_timeout_uninterruptible(usecs_to_jiffies
(num_micro_sec));
} else if (num_micro_sec <= 2000)
udelay(num_micro_sec);
else
mdelay(num_micro_sec / 1000);
}
/** Allocate an area of locked memory for bus master DMA operations.
If allocation fails, return 1, and *pMemArea.size = 0
*/
u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
struct pci_dev *pdev)
{
/*?? any benefit in using managed dmam_alloc_coherent? */
p_mem_area->vaddr =
dma_alloc_coherent(&pdev->dev, size, &p_mem_area->dma_handle,
GFP_KERNEL);
if (p_mem_area->vaddr) {
HPI_DEBUG_LOG(DEBUG, "allocated %d bytes, dma 0x%x vma %p\n",
size, (unsigned int)p_mem_area->dma_handle,
p_mem_area->vaddr);
p_mem_area->pdev = &pdev->dev;
p_mem_area->size = size;
return 0;
} else {
HPI_DEBUG_LOG(WARNING,
"failed to allocate %d bytes locked memory\n", size);
p_mem_area->size = 0;
return 1;
}
}
u16 hpios_locked_mem_free(struct consistent_dma_area *p_mem_area)
{
if (p_mem_area->size) {
dma_free_coherent(p_mem_area->pdev, p_mem_area->size,
p_mem_area->vaddr, p_mem_area->dma_handle);
HPI_DEBUG_LOG(DEBUG, "freed %lu bytes, dma 0x%x vma %p\n",
(unsigned long)p_mem_area->size,
(unsigned int)p_mem_area->dma_handle,
p_mem_area->vaddr);
p_mem_area->size = 0;
return 0;
} else {
return 1;
}
}
|
/*
* \file amdgpu_ioc32.c
*
* 32-bit ioctl compatibility routines for the AMDGPU DRM.
*
* \author Paul Mackerras <[email protected]>
*
* Copyright (C) Paul Mackerras 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_ioctl.h>
#include "amdgpu_drv.h"
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
return amdgpu_drm_ioctl(filp, cmd, arg);
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
*/
#ifndef __INTEL_BXTWC_H__
#define __INTEL_BXTWC_H__
/* BXT WC devices */
#define BXTWC_DEVICE1_ADDR 0x4E
#define BXTWC_DEVICE2_ADDR 0x4F
#define BXTWC_DEVICE3_ADDR 0x5E
/* device1 Registers */
#define BXTWC_CHIPID 0x4E00
#define BXTWC_CHIPVER 0x4E01
#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A
#define BXTWC_CHGRCTRL0_ADDR 0x5E16
#define BXTWC_CHGRCTRL1_ADDR 0x5E17
#define BXTWC_CHGRCTRL2_ADDR 0x5E18
#define BXTWC_CHGRSTATUS_ADDR 0x5E19
#define BXTWC_THRMBATZONE_ADDR 0x4F22
#define BXTWC_USBPATH_ADDR 0x5E19
#define BXTWC_USBPHYCTRL_ADDR 0x5E07
#define BXTWC_USBIDCTRL_ADDR 0x5E05
#define BXTWC_USBIDEN_MASK 0x01
#define BXTWC_USBIDSTAT_ADDR 0x00FF
#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29
#define BXTWC_DBGUSBBC1_ADDR 0x5FE0
#define BXTWC_DBGUSBBC2_ADDR 0x5FE1
#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2
#define BXTWC_WAKESRC_ADDR 0x4E22
#define BXTWC_WAKESRC2_ADDR 0x4EE5
#define BXTWC_CHRTTADDR_ADDR 0x5E22
#define BXTWC_CHRTTDATA_ADDR 0x5E23
#define BXTWC_STHRMIRQ0_ADDR 0x4F19
#define WC_MTHRMIRQ1_ADDR 0x4E12
#define WC_STHRMIRQ1_ADDR 0x4F1A
#define WC_STHRMIRQ2_ADDR 0x4F1B
#define BXTWC_THRMZN0H_ADDR 0x4F44
#define BXTWC_THRMZN0L_ADDR 0x4F45
#define BXTWC_THRMZN1H_ADDR 0x4F46
#define BXTWC_THRMZN1L_ADDR 0x4F47
#define BXTWC_THRMZN2H_ADDR 0x4F48
#define BXTWC_THRMZN2L_ADDR 0x4F49
#define BXTWC_THRMZN3H_ADDR 0x4F4A
#define BXTWC_THRMZN3L_ADDR 0x4F4B
#define BXTWC_THRMZN4H_ADDR 0x4F4C
#define BXTWC_THRMZN4L_ADDR 0x4F4D
#endif
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/cgroup.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/signal.h>
#include "cgroup-internal.h"
#include <trace/events/cgroup.h>
/*
* Update CGRP_FROZEN of cgroup.flag
* Return true if flags is updated; false if flags has no change
*/
static bool cgroup_update_frozen_flag(struct cgroup *cgrp, bool frozen)
{
lockdep_assert_held(&css_set_lock);
/* Already there? */
if (test_bit(CGRP_FROZEN, &cgrp->flags) == frozen)
return false;
if (frozen)
set_bit(CGRP_FROZEN, &cgrp->flags);
else
clear_bit(CGRP_FROZEN, &cgrp->flags);
cgroup_file_notify(&cgrp->events_file);
TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen);
return true;
}
/*
* Propagate the cgroup frozen state upwards by the cgroup tree.
*/
static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen)
{
int desc = 1;
/*
* If the new state is frozen, some freezing ancestor cgroups may change
* their state too, depending on if all their descendants are frozen.
*
* Otherwise, all ancestor cgroups are forced into the non-frozen state.
*/
while ((cgrp = cgroup_parent(cgrp))) {
if (frozen) {
cgrp->freezer.nr_frozen_descendants += desc;
if (!test_bit(CGRP_FREEZE, &cgrp->flags) ||
(cgrp->freezer.nr_frozen_descendants !=
cgrp->nr_descendants))
continue;
} else {
cgrp->freezer.nr_frozen_descendants -= desc;
}
if (cgroup_update_frozen_flag(cgrp, frozen))
desc++;
}
}
/*
* Revisit the cgroup frozen state.
* Checks if the cgroup is really frozen and perform all state transitions.
*/
void cgroup_update_frozen(struct cgroup *cgrp)
{
bool frozen;
/*
* If the cgroup has to be frozen (CGRP_FREEZE bit set),
* and all tasks are frozen and/or stopped, let's consider
* the cgroup frozen. Otherwise it's not frozen.
*/
frozen = test_bit(CGRP_FREEZE, &cgrp->flags) &&
cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp);
/* If flags is updated, update the state of ancestor cgroups. */
if (cgroup_update_frozen_flag(cgrp, frozen))
cgroup_propagate_frozen(cgrp, frozen);
}
/*
* Increment cgroup's nr_frozen_tasks.
*/
static void cgroup_inc_frozen_cnt(struct cgroup *cgrp)
{
cgrp->freezer.nr_frozen_tasks++;
}
/*
* Decrement cgroup's nr_frozen_tasks.
*/
static void cgroup_dec_frozen_cnt(struct cgroup *cgrp)
{
cgrp->freezer.nr_frozen_tasks--;
WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0);
}
/*
* Enter frozen/stopped state, if not yet there. Update cgroup's counters,
* and revisit the state of the cgroup, if necessary.
*/
void cgroup_enter_frozen(void)
{
struct cgroup *cgrp;
if (current->frozen)
return;
spin_lock_irq(&css_set_lock);
current->frozen = true;
cgrp = task_dfl_cgroup(current);
cgroup_inc_frozen_cnt(cgrp);
cgroup_update_frozen(cgrp);
spin_unlock_irq(&css_set_lock);
}
/*
* Conditionally leave frozen/stopped state. Update cgroup's counters,
* and revisit the state of the cgroup, if necessary.
*
* If always_leave is not set, and the cgroup is freezing,
* we're racing with the cgroup freezing. In this case, we don't
* drop the frozen counter to avoid a transient switch to
* the unfrozen state.
*/
void cgroup_leave_frozen(bool always_leave)
{
struct cgroup *cgrp;
spin_lock_irq(&css_set_lock);
cgrp = task_dfl_cgroup(current);
if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) {
cgroup_dec_frozen_cnt(cgrp);
cgroup_update_frozen(cgrp);
WARN_ON_ONCE(!current->frozen);
current->frozen = false;
} else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) {
spin_lock(¤t->sighand->siglock);
current->jobctl |= JOBCTL_TRAP_FREEZE;
set_thread_flag(TIF_SIGPENDING);
spin_unlock(¤t->sighand->siglock);
}
spin_unlock_irq(&css_set_lock);
}
/*
* Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
* jobctl bit.
*/
static void cgroup_freeze_task(struct task_struct *task, bool freeze)
{
unsigned long flags;
/* If the task is about to die, don't bother with freezing it. */
if (!lock_task_sighand(task, &flags))
return;
if (freeze) {
task->jobctl |= JOBCTL_TRAP_FREEZE;
signal_wake_up(task, false);
} else {
task->jobctl &= ~JOBCTL_TRAP_FREEZE;
wake_up_process(task);
}
unlock_task_sighand(task, &flags);
}
/*
* Freeze or unfreeze all tasks in the given cgroup.
*/
static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
{
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
if (freeze)
set_bit(CGRP_FREEZE, &cgrp->flags);
else
clear_bit(CGRP_FREEZE, &cgrp->flags);
spin_unlock_irq(&css_set_lock);
if (freeze)
TRACE_CGROUP_PATH(freeze, cgrp);
else
TRACE_CGROUP_PATH(unfreeze, cgrp);
css_task_iter_start(&cgrp->self, 0, &it);
while ((task = css_task_iter_next(&it))) {
/*
* Ignore kernel threads here. Freezing cgroups containing
* kthreads isn't supported.
*/
if (task->flags & PF_KTHREAD)
continue;
cgroup_freeze_task(task, freeze);
}
css_task_iter_end(&it);
/*
* Cgroup state should be revisited here to cover empty leaf cgroups
* and cgroups which descendants are already in the desired state.
*/
spin_lock_irq(&css_set_lock);
if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants)
cgroup_update_frozen(cgrp);
spin_unlock_irq(&css_set_lock);
}
/*
* Adjust the task state (freeze or unfreeze) and revisit the state of
* source and destination cgroups.
*/
void cgroup_freezer_migrate_task(struct task_struct *task,
struct cgroup *src, struct cgroup *dst)
{
lockdep_assert_held(&css_set_lock);
/*
* Kernel threads are not supposed to be frozen at all.
*/
if (task->flags & PF_KTHREAD)
return;
/*
* It's not necessary to do changes if both of the src and dst cgroups
* are not freezing and task is not frozen.
*/
if (!test_bit(CGRP_FREEZE, &src->flags) &&
!test_bit(CGRP_FREEZE, &dst->flags) &&
!task->frozen)
return;
/*
* Adjust counters of freezing and frozen tasks.
* Note, that if the task is frozen, but the destination cgroup is not
* frozen, we bump both counters to keep them balanced.
*/
if (task->frozen) {
cgroup_inc_frozen_cnt(dst);
cgroup_dec_frozen_cnt(src);
}
cgroup_update_frozen(dst);
cgroup_update_frozen(src);
/*
* Force the task to the desired state.
*/
cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
}
void cgroup_freeze(struct cgroup *cgrp, bool freeze)
{
struct cgroup_subsys_state *css;
struct cgroup *parent;
struct cgroup *dsct;
bool applied = false;
bool old_e;
lockdep_assert_held(&cgroup_mutex);
/*
* Nothing changed? Just exit.
*/
if (cgrp->freezer.freeze == freeze)
return;
cgrp->freezer.freeze = freeze;
/*
* Propagate changes downwards the cgroup tree.
*/
css_for_each_descendant_pre(css, &cgrp->self) {
dsct = css->cgroup;
if (cgroup_is_dead(dsct))
continue;
/*
* e_freeze is affected by parent's e_freeze and dst's freeze.
* If old e_freeze eq new e_freeze, no change, its children
* will not be affected. So do nothing and skip the subtree
*/
old_e = dsct->freezer.e_freeze;
parent = cgroup_parent(dsct);
dsct->freezer.e_freeze = (dsct->freezer.freeze ||
parent->freezer.e_freeze);
if (dsct->freezer.e_freeze == old_e) {
css = css_rightmost_descendant(css);
continue;
}
/*
* Do change actual state: freeze or unfreeze.
*/
cgroup_do_freeze(dsct, freeze);
applied = true;
}
/*
* Even if the actual state hasn't changed, let's notify a user.
* The state can be enforced by an ancestor cgroup: the cgroup
* can already be in the desired state or it can be locked in the
* opposite state, so that the transition will never happen.
* In both cases it's better to notify a user, that there is
* nothing to wait for.
*/
if (!applied) {
TRACE_CGROUP_PATH(notify_frozen, cgrp,
test_bit(CGRP_FROZEN, &cgrp->flags));
cgroup_file_notify(&cgrp->events_file);
}
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Samsung Exynos5433 TM2E board device tree source
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
* Device tree source file for Samsung's TM2E(TM2 EDGE) board which is based on
* Samsung Exynos5433 SoC.
*/
#include "exynos5433-tm2-common.dtsi"
/ {
model = "Samsung TM2E board";
compatible = "samsung,tm2e", "samsung,exynos5433";
chassis-type = "handset";
};
&cmu_disp {
/*
* TM2 and TM2e differ only by DISP_PLL rate, but define all assigned
* clocks properties for DISP CMU for each board to keep them together
* for easier review and maintenance.
*/
assigned-clocks = <&cmu_disp CLK_FOUT_DISP_PLL>,
<&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>,
<&cmu_disp CLK_MOUT_ACLK_DISP_333_USER>,
<&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>,
<&cmu_disp CLK_MOUT_SCLK_DSIM0>,
<&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>,
<&cmu_disp CLK_MOUT_SCLK_DECON_ECLK>,
<&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER>,
<&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER>,
<&cmu_disp CLK_MOUT_DISP_PLL>,
<&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>,
<&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>,
<&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>;
assigned-clock-parents = <0>, <0>,
<&cmu_mif CLK_ACLK_DISP_333>,
<&cmu_mif CLK_SCLK_DSIM0_DISP>,
<&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>,
<&cmu_mif CLK_SCLK_DECON_ECLK_DISP>,
<&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>,
<&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY>,
<&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY>,
<&cmu_disp CLK_FOUT_DISP_PLL>,
<&cmu_mif CLK_MOUT_BUS_PLL_DIV2>,
<&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>,
<&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>;
assigned-clock-rates = <278000000>, <400000000>;
};
&dsi {
panel@0 {
compatible = "samsung,s6e3hf2";
reg = <0>;
vdd3-supply = <&ldo27_reg>;
vci-supply = <&ldo28_reg>;
reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
};
};
&ldo31_reg {
regulator-name = "TSP_VDD_1.8V_AP";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
&ldo38_reg {
regulator-name = "VCC_3.3V_MOTOR_AP";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
&stmfts {
touchscreen-size-x = <1599>;
touchscreen-size-y = <2559>;
touch-key-connected;
ledvdd-supply = <&ldo33_reg>;
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <termios.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
struct fd_chan {
int fd;
int raw;
struct termios tt;
char str[sizeof("1234567890\0")];
};
static void *fd_init(char *str, int device, const struct chan_opts *opts)
{
struct fd_chan *data;
char *end;
int n;
if (*str != ':') {
printk(UM_KERN_ERR "fd_init : channel type 'fd' must specify a "
"file descriptor\n");
return NULL;
}
str++;
n = strtoul(str, &end, 0);
if ((*end != '\0') || (end == str)) {
printk(UM_KERN_ERR "fd_init : couldn't parse file descriptor "
"'%s'\n", str);
return NULL;
}
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct fd_chan) { .fd = n,
.raw = opts->raw });
return data;
}
static int fd_open(int input, int output, int primary, void *d, char **dev_out)
{
struct fd_chan *data = d;
int err;
if (data->raw && isatty(data->fd)) {
CATCH_EINTR(err = tcgetattr(data->fd, &data->tt));
if (err)
return err;
err = raw(data->fd);
if (err)
return err;
}
sprintf(data->str, "%d", data->fd);
*dev_out = data->str;
return data->fd;
}
static void fd_close(int fd, void *d)
{
struct fd_chan *data = d;
int err;
if (!data->raw || !isatty(fd))
return;
CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &data->tt));
if (err)
printk(UM_KERN_ERR "Failed to restore terminal state - "
"errno = %d\n", -err);
data->raw = 0;
}
const struct chan_ops fd_ops = {
.type = "fd",
.init = fd_init,
.open = fd_open,
.close = fd_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 1,
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* net/dccp/ccid.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <[email protected]>
*
* CCID infrastructure
*/
#include <linux/slab.h>
#include "ccid.h"
#include "ccids/lib/tfrc.h"
static struct ccid_operations *ccids[] = {
&ccid2_ops,
#ifdef CONFIG_IP_DCCP_CCID3
&ccid3_ops,
#endif
};
static struct ccid_operations *ccid_by_number(const u8 id)
{
int i;
for (i = 0; i < ARRAY_SIZE(ccids); i++)
if (ccids[i]->ccid_id == id)
return ccids[i];
return NULL;
}
/* check that up to @array_len members in @ccid_array are supported */
bool ccid_support_check(u8 const *ccid_array, u8 array_len)
{
while (array_len > 0)
if (ccid_by_number(ccid_array[--array_len]) == NULL)
return false;
return true;
}
/**
* ccid_get_builtin_ccids - Populate a list of built-in CCIDs
* @ccid_array: pointer to copy into
* @array_len: value to return length into
*
* This function allocates memory - caller must see that it is freed after use.
*/
int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
{
*ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
if (*ccid_array == NULL)
return -ENOBUFS;
for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
(*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
return 0;
}
int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
u8 *ccid_array, array_len;
int err = 0;
if (ccid_get_builtin_ccids(&ccid_array, &array_len))
return -ENOBUFS;
if (put_user(array_len, optlen))
err = -EFAULT;
else if (len > 0 && copy_to_user(optval, ccid_array,
len > array_len ? array_len : len))
err = -EFAULT;
kfree(ccid_array);
return err;
}
static __printf(3, 4) struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
struct kmem_cache *slab;
va_list args;
va_start(args, fmt);
vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
va_end(args);
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
return slab;
}
static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
kmem_cache_destroy(slab);
}
static int __init ccid_activate(struct ccid_operations *ccid_ops)
{
int err = -ENOBUFS;
ccid_ops->ccid_hc_rx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
ccid_ops->ccid_hc_rx_slab_name,
"ccid%u_hc_rx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_rx_slab == NULL)
goto out;
ccid_ops->ccid_hc_tx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
ccid_ops->ccid_hc_tx_slab_name,
"ccid%u_hc_tx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_tx_slab == NULL)
goto out_free_rx_slab;
pr_info("DCCP: Activated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
err = 0;
out:
return err;
out_free_rx_slab:
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
ccid_ops->ccid_hc_rx_slab = NULL;
goto out;
}
static void ccid_deactivate(struct ccid_operations *ccid_ops)
{
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
ccid_ops->ccid_hc_tx_slab = NULL;
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
ccid_ops->ccid_hc_rx_slab = NULL;
pr_info("DCCP: Deactivated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
}
struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
{
struct ccid_operations *ccid_ops = ccid_by_number(id);
struct ccid *ccid = NULL;
if (ccid_ops == NULL)
goto out;
ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, gfp_any());
if (ccid == NULL)
goto out;
ccid->ccid_ops = ccid_ops;
if (rx) {
memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
goto out_free_ccid;
} else {
memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
goto out_free_ccid;
}
out:
return ccid;
out_free_ccid:
kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, ccid);
ccid = NULL;
goto out;
}
void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
{
if (ccid != NULL) {
if (ccid->ccid_ops->ccid_hc_rx_exit != NULL)
ccid->ccid_ops->ccid_hc_rx_exit(sk);
kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid);
}
}
void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
{
if (ccid != NULL) {
if (ccid->ccid_ops->ccid_hc_tx_exit != NULL)
ccid->ccid_ops->ccid_hc_tx_exit(sk);
kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid);
}
}
int __init ccid_initialize_builtins(void)
{
int i, err = tfrc_lib_init();
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ccids); i++) {
err = ccid_activate(ccids[i]);
if (err)
goto unwind_registrations;
}
return 0;
unwind_registrations:
while(--i >= 0)
ccid_deactivate(ccids[i]);
tfrc_lib_exit();
return err;
}
void ccid_cleanup_builtins(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ccids); i++)
ccid_deactivate(ccids[i]);
tfrc_lib_exit();
}
|
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
struct command_table_helper;
/* Initialize command table helper functions */
const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
#include <asm/linkage.h>
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__
/**********************/
/* ISCSI FW CONSTANTS */
/**********************/
/* iSCSI HSI constants */
#define ISCSI_DEFAULT_MTU (1500)
/* KWQ (kernel work queue) layer codes */
#define ISCSI_SLOW_PATH_LAYER_CODE (6)
/* iSCSI parameter defaults */
#define ISCSI_DEFAULT_HEADER_DIGEST (0)
#define ISCSI_DEFAULT_DATA_DIGEST (0)
#define ISCSI_DEFAULT_INITIAL_R2T (1)
#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
/* iSCSI parameter limits */
#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
#define ISCSI_AHS_CNTL_SIZE 4
#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
#define ISCSI_OPTION_2_ON_CHIP_TCP 2
#define ISCSI_INITIATOR_MODE 0
#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
#define ISCSI_OPCODE_NOP_OUT (0)
#define ISCSI_OPCODE_SCSI_CMD (1)
#define ISCSI_OPCODE_TMF_REQUEST (2)
#define ISCSI_OPCODE_LOGIN_REQUEST (3)
#define ISCSI_OPCODE_TEXT_REQUEST (4)
#define ISCSI_OPCODE_DATA_OUT (5)
#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
#define ISCSI_OPCODE_NOP_IN (0x20)
#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
#define ISCSI_OPCODE_DATA_IN (0x25)
#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
#define ISCSI_OPCODE_R2T (0x31)
#define ISCSI_OPCODE_ASYNC_MSG (0x32)
#define ISCSI_OPCODE_REJECT (0x3f)
/* iSCSI stages */
#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
/* iSCSI CQE errors */
#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
/* Union of data bd_opaque/ tq_tid */
union bd_opaque_tq_union {
__le16 bd_opaque;
__le16 tq_tid;
};
/* ISCSI SGL entry */
struct cqe_error_bitmap {
u8 cqe_error_status_bits;
#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
};
union cqe_error_status {
u8 error_status;
struct cqe_error_bitmap error_bits;
};
/* iSCSI Login Response PDU header */
struct data_hdr {
__le32 data[12];
};
struct lun_mapper_addr_reserved {
struct regpair lun_mapper_addr;
u8 reserved0[8];
};
/* rdif conetxt for dif on immediate */
struct dif_on_immediate_params {
__le32 initial_ref_tag;
__le16 application_tag;
__le16 application_tag_mask;
__le16 flags1;
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
u8 flags0;
#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
u8 reserved_zero[5];
};
/* iSCSI dif on immediate mode attributes union */
union dif_configuration_params {
struct lun_mapper_addr_reserved lun_mapper_address;
struct dif_on_immediate_params def_dif_conf;
};
/* Union of data/r2t sequence number */
union iscsi_seq_num {
__le16 data_sn;
__le16 r2t_sn;
};
/* iSCSI DIF flags */
struct iscsi_dif_flags {
u8 flags;
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
};
/* The iscsi storm task context of Ystorm */
struct ystorm_iscsi_task_state {
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
__le32 buffer_offset;
union iscsi_seq_num seq_num;
struct iscsi_dif_flags dif_flags;
u8 flags;
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
};
/* The iscsi storm task context of Ystorm */
struct ystorm_iscsi_task_rxmit_opt {
__le32 fast_rxmit_sge_offset;
__le32 scan_start_buffer_offset;
__le32 fast_rxmit_buffer_offset;
u8 scan_start_sgl_index;
u8 fast_rxmit_sgl_index;
__le16 reserved;
};
/* iSCSI Common PDU header */
struct iscsi_common_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
#define ISCSI_COMMON_HDR_IMM_MASK 0x1
#define ISCSI_COMMON_HDR_IMM_SHIFT 6
#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword;
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
__le32 cmdstat_sn;
__le32 exp_statcmd_sn;
__le32 max_cmd_sn;
__le32 data[3];
};
/* iSCSI Command PDU header */
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
#define ISCSI_CMD_HDR_ATTR_MASK 0x7
#define ISCSI_CMD_HDR_ATTR_SHIFT 0
#define ISCSI_CMD_HDR_RSRV_MASK 0x3
#define ISCSI_CMD_HDR_RSRV_SHIFT 3
#define ISCSI_CMD_HDR_WRITE_MASK 0x1
#define ISCSI_CMD_HDR_WRITE_SHIFT 5
#define ISCSI_CMD_HDR_READ_MASK 0x1
#define ISCSI_CMD_HDR_READ_SHIFT 6
#define ISCSI_CMD_HDR_FINAL_MASK 0x1
#define ISCSI_CMD_HDR_FINAL_SHIFT 7
u8 hdr_first_byte;
#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
#define ISCSI_CMD_HDR_IMM_MASK 0x1
#define ISCSI_CMD_HDR_IMM_SHIFT 6
#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
__le32 hdr_second_dword;
#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 cdb[4];
};
/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
struct iscsi_ext_cdb_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
struct scsi_sge cdb_sge;
};
/* iSCSI login request PDU header */
struct iscsi_login_req_hdr {
u8 version_min;
u8 version_max;
u8 flags_attr;
#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
__le16 reserved1;
__le16 cid;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 reserved2[4];
};
/* iSCSI logout request PDU header */
struct iscsi_logout_req_hdr {
__le16 reserved0;
u8 reason_code;
u8 opcode;
__le32 reserved1;
__le32 reserved2[2];
__le32 itt;
__le16 reserved3;
__le16 cid;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 reserved4[4];
};
/* iSCSI Data-out PDU header */
struct iscsi_data_out_hdr {
__le16 reserved1;
u8 flags_attr;
#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 reserved3;
__le32 exp_stat_sn;
__le32 reserved4;
__le32 data_sn;
__le32 buffer_offset;
__le32 reserved5;
};
/* iSCSI Data-in PDU header */
struct iscsi_data_in_hdr {
u8 status_rsvd;
u8 reserved1;
u8 flags;
#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 data_sn;
__le32 buffer_offset;
__le32 residual_count;
};
/* iSCSI R2T PDU header */
struct iscsi_r2t_hdr {
u8 reserved0[3];
u8 opcode;
__le32 reserved2;
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 r2t_sn;
__le32 buffer_offset;
__le32 desired_data_trns_len;
};
/* iSCSI NOP-out PDU header */
struct iscsi_nop_out_hdr {
__le16 reserved1;
u8 flags_attr;
#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 reserved3;
__le32 reserved4;
__le32 reserved5;
__le32 reserved6;
};
/* iSCSI NOP-in PDU header */
struct iscsi_nop_in_hdr {
__le16 reserved0;
u8 flags_attr;
#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved5;
__le32 reserved6;
__le32 reserved7;
};
/* iSCSI Login Response PDU header */
struct iscsi_login_response_hdr {
u8 version_active;
u8 version_max;
u8 flags_attr;
#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
__le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le16 reserved2;
u8 status_detail;
u8 status_class;
__le32 reserved4[2];
};
/* iSCSI Logout Response PDU header */
struct iscsi_logout_response_hdr {
u8 reserved1;
u8 response;
u8 flags;
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 reserved2[2];
__le32 itt;
__le32 reserved3;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4;
__le16 time_2_retain;
__le16 time_2_wait;
__le32 reserved5[1];
};
/* iSCSI Text Request PDU header */
struct iscsi_text_request_hdr {
__le16 reserved0;
u8 flags_attr;
#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 reserved4[4];
};
/* iSCSI Text Response PDU header */
struct iscsi_text_response_hdr {
__le16 reserved1;
u8 flags;
#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4[3];
};
/* iSCSI TMF Request PDU header */
struct iscsi_tmf_request_hdr {
__le16 reserved0;
u8 function;
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 rtt;
__le32 cmd_sn;
__le32 exp_stat_sn;
__le32 ref_cmd_sn;
__le32 exp_data_sn;
__le32 reserved4[2];
};
struct iscsi_tmf_response_hdr {
u8 reserved2;
u8 hdr_response;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
__le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4[3];
};
/* iSCSI Response PDU header */
struct iscsi_response_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 snack_tag;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 exp_data_sn;
__le32 bi_residual_count;
__le32 residual_count;
};
/* iSCSI Reject PDU header */
struct iscsi_reject_hdr {
u8 reserved4;
u8 hdr_reason;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 all_ones;
__le32 reserved2;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 data_sn;
__le32 reserved3[2];
};
/* iSCSI Asynchronous Message PDU header */
struct iscsi_async_msg_hdr {
__le16 reserved0;
u8 flags_attr;
#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 all_ones;
__le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le16 param1_rsrv;
u8 async_vcode;
u8 async_event;
__le16 param3_rsrv;
__le16 param2_rsrv;
__le32 reserved7;
};
/* PDU header part of Ystorm task context */
union iscsi_task_hdr {
struct iscsi_common_hdr common;
struct data_hdr data;
struct iscsi_cmd_hdr cmd;
struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd;
struct iscsi_login_req_hdr login_req;
struct iscsi_logout_req_hdr logout_req;
struct iscsi_data_out_hdr data_out;
struct iscsi_data_in_hdr data_in;
struct iscsi_r2t_hdr r2t;
struct iscsi_nop_out_hdr nop_out;
struct iscsi_nop_in_hdr nop_in;
struct iscsi_login_response_hdr login_response;
struct iscsi_logout_response_hdr logout_response;
struct iscsi_text_request_hdr text_request;
struct iscsi_text_response_hdr text_response;
struct iscsi_tmf_request_hdr tmf_request;
struct iscsi_tmf_response_hdr tmf_response;
struct iscsi_response_hdr response;
struct iscsi_reject_hdr reject;
struct iscsi_async_msg_hdr async_msg;
};
/* The iscsi storm task context of Ystorm */
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
};
struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
u8 byte4;
__le16 word1;
};
struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
u8 byte4;
__le16 word1;
};
struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
__le32 exp_cont_len;
__le32 total_data_acked;
__le32 exp_data_acked;
u8 byte2;
u8 byte3;
__le16 word1;
__le16 next_tid;
__le16 word3;
__le32 hdr_residual_count;
__le32 exp_r2t_sn;
};
/* The iscsi storm task context of Mstorm */
struct mstorm_iscsi_task_st_ctx {
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 rem_task_size;
__le32 data_buffer_offset;
u8 task_type;
struct iscsi_dif_flags dif_flags;
__le16 dif_task_icid;
struct regpair sense_db;
__le32 expected_itt;
__le32 reserved1;
};
struct iscsi_reg1 {
__le32 reg1_map;
#define ISCSI_REG1_NUM_SGES_MASK 0xF
#define ISCSI_REG1_NUM_SGES_SHIFT 0
#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
#define ISCSI_REG1_RESERVED1_SHIFT 4
};
struct tqe_opaque {
__le16 opaque[2];
};
/* The iscsi storm task context of Ustorm */
struct ustorm_iscsi_task_st_ctx {
__le32 rem_rcv_len;
__le32 exp_data_transfer_len;
__le32 exp_data_sn;
struct regpair lun;
struct iscsi_reg1 reg1;
u8 flags2;
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
struct iscsi_dif_flags dif_flags;
__le16 reserved3;
struct tqe_opaque tqe_opaque_list;
__le32 reserved5;
__le32 reserved6;
__le32 reserved7;
u8 task_type;
u8 error_flags;
#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
u8 flags;
#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
u8 cq_rss_number;
};
/* iscsi task context */
struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
};
/* iSCSI connection offload params passed by driver to FW in ISCSI offload
* ramrod.
*/
struct iscsi_conn_offload_params {
struct regpair sq_pbl_addr;
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 default_cq;
__le16 reserved0;
__le32 stat_sn;
__le32 initial_ack;
};
/* iSCSI connection statistics */
struct iscsi_conn_stats_params {
struct regpair iscsi_tcp_tx_packets_cnt;
struct regpair iscsi_tcp_tx_bytes_cnt;
struct regpair iscsi_tcp_tx_rxmit_cnt;
struct regpair iscsi_tcp_rx_packets_cnt;
struct regpair iscsi_tcp_rx_bytes_cnt;
struct regpair iscsi_tcp_rx_dup_ack_cnt;
__le32 iscsi_tcp_rx_chksum_err_cnt;
__le32 reserved;
};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
__le32 first_seq_length;
__le32 exp_stat_sn;
union dif_configuration_params dif_on_imme_params;
};
/* iSCSI CQ element */
struct iscsi_cqe_common {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le32 reserved[3];
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
__le32 data_truncated_bytes;
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
__le16 rqe_opaque;
__le16 reserved2[3];
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
/* iSCSI CQE type */
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
};
/* iSCSI CQE type */
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
ISCSI_CQE_UNSOLICITED_FIRST,
ISCSI_CQE_UNSOLICITED_MIDDLE,
ISCSI_CQE_UNSOLICITED_LAST,
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
/* iscsi debug modes */
struct iscsi_debug_modes {
u8 flags;
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
};
/* iSCSI kernel completion queue IDs */
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
ISCSI_EVENT_TYPE_OFFLOAD_CONN,
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
MAX_ISCSI_EQE_OPCODE
};
/* iSCSI EQE and CQE completion status */
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
ISCSI_CONN_ERROR_TASK_NOT_VALID,
ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
ISCSI_CONN_ERROR_DATA_OVERRUN,
ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
ISCSI_CONN_ERROR_IP_OPTIONS_ERROR,
ISCSI_CONN_ERROR_PRS_ERRORS,
ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION,
ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
/* iSCSI Ramrod Command IDs */
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
MAX_ISCSI_RAMROD_CMD_ID
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved2[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI collect connection statistics request */
struct iscsi_spe_conn_statistics {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
u8 reset_stats;
u8 reserved2[7];
struct regpair stats_cnts_addr;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
u8 abortive;
u8 reserved2[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
u8 flags;
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
u8 params;
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
/* iSCSI task type */
enum iscsi_task_type {
ISCSI_TASK_TYPE_INITIATOR_WRITE,
ISCSI_TASK_TYPE_INITIATOR_READ,
ISCSI_TASK_TYPE_MIDPATH,
ISCSI_TASK_TYPE_UNSOLIC,
ISCSI_TASK_TYPE_EXCHCLEANUP,
ISCSI_TASK_TYPE_IRRELEVANT,
ISCSI_TASK_TYPE_TARGET_WRITE,
ISCSI_TASK_TYPE_TARGET_READ,
ISCSI_TASK_TYPE_TARGET_RESPONSE,
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
MAX_ISCSI_TASK_TYPE
};
/* iSCSI DesiredDataTransferLength/ttt union */
union iscsi_ttt_txlen_union {
__le32 desired_tx_len;
__le32 ttt;
};
/* iSCSI uHQ element */
struct iscsi_uhqe {
__le32 reg1;
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
__le32 reg2;
#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
/* iSCSI WQ element */
struct iscsi_wqe {
__le16 task_id;
u8 flags;
#define ISCSI_WQE_WQE_TYPE_MASK 0x7
#define ISCSI_WQE_WQE_TYPE_SHIFT 0
#define ISCSI_WQE_NUM_SGES_MASK 0xF
#define ISCSI_WQE_NUM_SGES_SHIFT 3
#define ISCSI_WQE_RESPONSE_MASK 0x1
#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
__le32 contlen_cdbsize;
#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
#define ISCSI_WQE_CONT_LEN_SHIFT 0
#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
/* iSCSI wqe type */
enum iscsi_wqe_type {
ISCSI_WQE_TYPE_NORMAL,
ISCSI_WQE_TYPE_TASK_CLEANUP,
ISCSI_WQE_TYPE_MIDDLE_PATH,
ISCSI_WQE_TYPE_LOGIN,
ISCSI_WQE_TYPE_FIRST_R2T_CONT,
ISCSI_WQE_TYPE_NONFIRST_R2T_CONT,
ISCSI_WQE_TYPE_RESPONSE,
MAX_ISCSI_WQE_TYPE
};
/* iSCSI xHQ element */
struct iscsi_xhqe {
union iscsi_ttt_txlen_union ttt_or_txlen;
__le32 exp_stat_sn;
struct iscsi_dif_flags prot_flags;
u8 total_ahs_length;
u8 opcode;
u8 flags;
#define ISCSI_XHQE_FINAL_MASK 0x1
#define ISCSI_XHQE_FINAL_SHIFT 0
#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
#define ISCSI_XHQE_NUM_SGES_MASK 0xF
#define ISCSI_XHQE_NUM_SGES_SHIFT 2
#define ISCSI_XHQE_RESERVED0_MASK 0x3
#define ISCSI_XHQE_RESERVED0_SHIFT 6
union iscsi_seq_num seq_num;
__le16 reserved1;
};
/* Per PF iSCSI receive path statistics - mStorm RAM structure */
struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
struct regpair iscsi_rx_dup_ack_cnt;
};
/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
};
/* Per PF iSCSI receive path statistics - tStorm RAM structure */
struct tstorm_iscsi_stats_drv {
struct regpair iscsi_rx_bytes_cnt;
struct regpair iscsi_rx_packet_cnt;
struct regpair iscsi_rx_new_ooo_isle_events_cnt;
struct regpair iscsi_rx_tcp_payload_bytes_cnt;
struct regpair iscsi_rx_tcp_pkt_cnt;
struct regpair iscsi_rx_pure_ack_cnt;
__le32 iscsi_cmdq_threshold_cnt;
__le32 iscsi_rq_threshold_cnt;
__le32 iscsi_immq_threshold_cnt;
};
/* Per PF iSCSI receive path statistics - uStorm RAM structure */
struct ustorm_iscsi_stats_drv {
struct regpair iscsi_rx_data_pdu_cnt;
struct regpair iscsi_rx_r2t_pdu_cnt;
struct regpair iscsi_rx_total_pdu_cnt;
};
/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
struct xstorm_iscsi_stats_drv {
struct regpair iscsi_tx_go_to_slow_start_event_cnt;
struct regpair iscsi_tx_fast_retransmit_event_cnt;
struct regpair iscsi_tx_pure_ack_cnt;
struct regpair iscsi_tx_delayed_ack_cnt;
};
/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_data_pdu_cnt;
struct regpair iscsi_tx_r2t_pdu_cnt;
struct regpair iscsi_tx_total_pdu_cnt;
struct regpair iscsi_tx_tcp_payload_bytes_cnt;
struct regpair iscsi_tx_tcp_pkt_cnt;
};
struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
u8 byte3;
u8 byte4;
__le16 word2;
__le16 word3;
__le16 word4;
__le32 reg1;
__le32 reg2;
};
/* iSCSI doorbell data */
struct iscsi_db_data {
u8 params;
#define ISCSI_DB_DATA_DEST_MASK 0x3
#define ISCSI_DB_DATA_DEST_SHIFT 0
#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
#define ISCSI_DB_DATA_RESERVED_MASK 0x1
#define ISCSI_DB_DATA_RESERVED_SHIFT 5
#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
#endif /* __ISCSI_COMMON__ */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef _PixelGen_SysBlock_defs_h
#define _PixelGen_SysBlock_defs_h
/* Parematers and User_Parameters for HSS */
#define _PXG_PPC Ppc
#define _PXG_PIXEL_BITS PixelWidth
#define _PXG_MAX_NOF_SID MaxNofSids
#define _PXG_DATA_BITS DataWidth
#define _PXG_CNT_BITS CntWidth
#define _PXG_FIFODEPTH FifoDepth
#define _PXG_DBG Dbg_device_not_included
/* ID's and Address */
#define _PXG_ADRRESS_ALIGN_REG 4
#define _PXG_COM_ENABLE_REG_IDX 0
#define _PXG_PRBS_RSTVAL_REG0_IDX 1
#define _PXG_PRBS_RSTVAL_REG1_IDX 2
#define _PXG_SYNG_SID_REG_IDX 3
#define _PXG_SYNG_FREE_RUN_REG_IDX 4
#define _PXG_SYNG_PAUSE_REG_IDX 5
#define _PXG_SYNG_NOF_FRAME_REG_IDX 6
#define _PXG_SYNG_NOF_PIXEL_REG_IDX 7
#define _PXG_SYNG_NOF_LINE_REG_IDX 8
#define _PXG_SYNG_HBLANK_CYC_REG_IDX 9
#define _PXG_SYNG_VBLANK_CYC_REG_IDX 10
#define _PXG_SYNG_STAT_HCNT_REG_IDX 11
#define _PXG_SYNG_STAT_VCNT_REG_IDX 12
#define _PXG_SYNG_STAT_FCNT_REG_IDX 13
#define _PXG_SYNG_STAT_DONE_REG_IDX 14
#define _PXG_TPG_MODE_REG_IDX 15
#define _PXG_TPG_HCNT_MASK_REG_IDX 16
#define _PXG_TPG_VCNT_MASK_REG_IDX 17
#define _PXG_TPG_XYCNT_MASK_REG_IDX 18
#define _PXG_TPG_HCNT_DELTA_REG_IDX 19
#define _PXG_TPG_VCNT_DELTA_REG_IDX 20
#define _PXG_TPG_R1_REG_IDX 21
#define _PXG_TPG_G1_REG_IDX 22
#define _PXG_TPG_B1_REG_IDX 23
#define _PXG_TPG_R2_REG_IDX 24
#define _PXG_TPG_G2_REG_IDX 25
#define _PXG_TPG_B2_REG_IDX 26
/* */
#define _PXG_SYNG_PAUSE_CYCLES 0
/* Subblock ID's */
#define _PXG_DISABLE_IDX 0
#define _PXG_PRBS_IDX 0
#define _PXG_TPG_IDX 1
#define _PXG_SYNG_IDX 2
#define _PXG_SMUX_IDX 3
/* Register Widths */
#define _PXG_COM_ENABLE_REG_WIDTH 2
#define _PXG_COM_SRST_REG_WIDTH 4
#define _PXG_PRBS_RSTVAL_REG0_WIDTH 31
#define _PXG_PRBS_RSTVAL_REG1_WIDTH 31
#define _PXG_SYNG_SID_REG_WIDTH 3
#define _PXG_SYNG_FREE_RUN_REG_WIDTH 1
#define _PXG_SYNG_PAUSE_REG_WIDTH 1
/*
#define _PXG_SYNG_NOF_FRAME_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_NOF_PIXEL_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_NOF_LINE_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_HBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_VBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_STAT_HCNT_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_STAT_VCNT_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_SYNG_STAT_FCNT_REG_WIDTH <sync_gen_cnt_width>
*/
#define _PXG_SYNG_STAT_DONE_REG_WIDTH 1
#define _PXG_TPG_MODE_REG_WIDTH 2
/*
#define _PXG_TPG_HCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_TPG_VCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
#define _PXG_TPG_XYCNT_MASK_REG_WIDTH <pixle_width>
*/
#define _PXG_TPG_HCNT_DELTA_REG_WIDTH 4
#define _PXG_TPG_VCNT_DELTA_REG_WIDTH 4
/*
#define _PXG_TPG_R1_REG_WIDTH <pixle_width>
#define _PXG_TPG_G1_REG_WIDTH <pixle_width>
#define _PXG_TPG_B1_REG_WIDTH <pixle_width>
#define _PXG_TPG_R2_REG_WIDTH <pixle_width>
#define _PXG_TPG_G2_REG_WIDTH <pixle_width>
#define _PXG_TPG_B2_REG_WIDTH <pixle_width>
*/
#define _PXG_FIFO_DEPTH 2
/* MISC */
#define _PXG_ENABLE_REG_VAL 1
#define _PXG_PRBS_ENABLE_REG_VAL 1
#define _PXG_TPG_ENABLE_REG_VAL 2
#define _PXG_SYNG_ENABLE_REG_VAL 4
#define _PXG_FIFO_ENABLE_REG_VAL 8
#define _PXG_PXL_BITS 14
#define _PXG_INVALID_FLAG 0xDEADBEEF
#define _PXG_CAFE_FLAG 0xCAFEBABE
#endif /* _PixelGen_SysBlock_defs_h */
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for National Instruments AT-A2150 boards
* Copyright (C) 2001, 2002 Frank Mori Hess <[email protected]>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <[email protected]>
*/
/*
* Driver: ni_at_a2150
* Description: National Instruments AT-A2150
* Author: Frank Mori Hess
* Status: works
* Devices: [National Instruments] AT-A2150C (at_a2150c), AT-2150S (at_a2150s)
*
* Configuration options:
* [0] - I/O port base address
* [1] - IRQ (optional, required for timed conversions)
* [2] - DMA (optional, required for timed conversions)
*
* Yet another driver for obsolete hardware brought to you by Frank Hess.
* Testing and debugging help provided by Dave Andruczyk.
*
* If you want to ac couple the board's inputs, use AREF_OTHER.
*
* The only difference in the boards is their master clock frequencies.
*
* References (from ftp://ftp.natinst.com/support/manuals):
* 320360.pdf AT-A2150 User Manual
*
* TODO:
* - analog level triggering
* - TRIG_WAKE_EOS
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8254.h>
#include <linux/comedi/comedi_isadma.h>
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
/* Registers and bits */
#define CONFIG_REG 0x0
#define CHANNEL_BITS(x) ((x) & 0x7)
#define CHANNEL_MASK 0x7
#define CLOCK_SELECT_BITS(x) (((x) & 0x3) << 3)
#define CLOCK_DIVISOR_BITS(x) (((x) & 0x3) << 5)
#define CLOCK_MASK (0xf << 3)
/* enable (don't internally ground) channels 0 and 1 */
#define ENABLE0_BIT 0x80
/* enable (don't internally ground) channels 2 and 3 */
#define ENABLE1_BIT 0x100
#define AC0_BIT 0x200 /* ac couple channels 0,1 */
#define AC1_BIT 0x400 /* ac couple channels 2,3 */
#define APD_BIT 0x800 /* analog power down */
#define DPD_BIT 0x1000 /* digital power down */
#define TRIGGER_REG 0x2 /* trigger config register */
#define POST_TRIGGER_BITS 0x2
#define DELAY_TRIGGER_BITS 0x3
#define HW_TRIG_EN 0x10 /* enable hardware trigger */
#define FIFO_START_REG 0x6 /* software start aquistion trigger */
#define FIFO_RESET_REG 0x8 /* clears fifo + fifo flags */
#define FIFO_DATA_REG 0xa /* read data */
#define DMA_TC_CLEAR_REG 0xe /* clear dma terminal count interrupt */
#define STATUS_REG 0x12 /* read only */
#define FNE_BIT 0x1 /* fifo not empty */
#define OVFL_BIT 0x8 /* fifo overflow */
#define EDAQ_BIT 0x10 /* end of acquisition interrupt */
#define DCAL_BIT 0x20 /* offset calibration in progress */
#define INTR_BIT 0x40 /* interrupt has occurred */
/* dma terminal count interrupt has occurred */
#define DMA_TC_BIT 0x80
#define ID_BITS(x) (((x) >> 8) & 0x3)
#define IRQ_DMA_CNTRL_REG 0x12 /* write only */
#define DMA_CHAN_BITS(x) ((x) & 0x7) /* sets dma channel */
#define DMA_EN_BIT 0x8 /* enables dma */
#define IRQ_LVL_BITS(x) (((x) & 0xf) << 4) /* sets irq level */
#define FIFO_INTR_EN_BIT 0x100 /* enable fifo interrupts */
#define FIFO_INTR_FHF_BIT 0x200 /* interrupt fifo half full */
/* enable interrupt on dma terminal count */
#define DMA_INTR_EN_BIT 0x800
#define DMA_DEM_EN_BIT 0x1000 /* enables demand mode dma */
#define I8253_BASE_REG 0x14
struct a2150_board {
const char *name;
int clock[4]; /* master clock periods, in nanoseconds */
int num_clocks; /* number of available master clock speeds */
int ai_speed; /* maximum conversion rate in nanoseconds */
};
/* analog input range */
static const struct comedi_lrange range_a2150 = {
1, {
BIP_RANGE(2.828)
}
};
/* enum must match board indices */
enum { a2150_c, a2150_s };
static const struct a2150_board a2150_boards[] = {
{
.name = "at-a2150c",
.clock = {31250, 22676, 20833, 19531},
.num_clocks = 4,
.ai_speed = 19531,
},
{
.name = "at-a2150s",
.clock = {62500, 50000, 41667, 0},
.num_clocks = 3,
.ai_speed = 41667,
},
};
struct a2150_private {
struct comedi_isadma *dma;
unsigned int count; /* number of data points left to be taken */
int irq_dma_bits; /* irq/dma register bits */
int config_bits; /* config register bits */
};
/* interrupt service routine */
static irqreturn_t a2150_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct a2150_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
struct comedi_isadma_desc *desc = &dma->desc[0];
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned short *buf = desc->virt_addr;
unsigned int max_points, num_points, residue, leftover;
unsigned short dpnt;
int status;
int i;
if (!dev->attached)
return IRQ_HANDLED;
status = inw(dev->iobase + STATUS_REG);
if ((status & INTR_BIT) == 0)
return IRQ_NONE;
if (status & OVFL_BIT) {
async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
async->events |= COMEDI_CB_ERROR;
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
/*
* residue is the number of bytes left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
residue = comedi_isadma_disable(desc->chan);
/* figure out how many points to read */
max_points = comedi_bytes_to_samples(s, desc->size);
num_points = max_points - comedi_bytes_to_samples(s, residue);
if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
num_points = devpriv->count;
/* figure out how many points will be stored next time */
leftover = 0;
if (cmd->stop_src == TRIG_NONE) {
leftover = comedi_bytes_to_samples(s, desc->size);
} else if (devpriv->count > max_points) {
leftover = devpriv->count - max_points;
if (leftover > max_points)
leftover = max_points;
}
/*
* There should only be a residue if collection was stopped by having
* the stop_src set to an external trigger, in which case there
* will be no more data
*/
if (residue)
leftover = 0;
for (i = 0; i < num_points; i++) {
/* write data point to comedi buffer */
dpnt = buf[i];
/* convert from 2's complement to unsigned coding */
dpnt ^= 0x8000;
comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT) {
if (--devpriv->count == 0) { /* end of acquisition */
async->events |= COMEDI_CB_EOA;
break;
}
}
}
/* re-enable dma */
if (leftover) {
desc->size = comedi_samples_to_bytes(s, leftover);
comedi_isadma_program(desc);
}
comedi_handle_events(dev, s);
/* clear interrupt */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
return IRQ_HANDLED;
}
static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct a2150_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
struct comedi_isadma_desc *desc = &dma->desc[0];
/* disable dma on card */
devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
/* disable computer's dma */
comedi_isadma_disable(desc->chan);
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
return 0;
}
/*
* sets bits in devpriv->clock_bits to nearest approximation of requested
* period, adjusts requested period to actual timing.
*/
static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
unsigned int flags)
{
const struct a2150_board *board = dev->board_ptr;
struct a2150_private *devpriv = dev->private;
int lub, glb, temp;
int lub_divisor_shift, lub_index, glb_divisor_shift, glb_index;
int i, j;
/* initialize greatest lower and least upper bounds */
lub_divisor_shift = 3;
lub_index = 0;
lub = board->clock[lub_index] * (1 << lub_divisor_shift);
glb_divisor_shift = 0;
glb_index = board->num_clocks - 1;
glb = board->clock[glb_index] * (1 << glb_divisor_shift);
/* make sure period is in available range */
if (*period < glb)
*period = glb;
if (*period > lub)
*period = lub;
/* we can multiply period by 1, 2, 4, or 8, using (1 << i) */
for (i = 0; i < 4; i++) {
/* there are a maximum of 4 master clocks */
for (j = 0; j < board->num_clocks; j++) {
/* temp is the period in nanosec we are evaluating */
temp = board->clock[j] * (1 << i);
/* if it is the best match yet */
if (temp < lub && temp >= *period) {
lub_divisor_shift = i;
lub_index = j;
lub = temp;
}
if (temp > glb && temp <= *period) {
glb_divisor_shift = i;
glb_index = j;
glb = temp;
}
}
}
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
/* if least upper bound is better approximation */
if (lub - *period < *period - glb)
*period = lub;
else
*period = glb;
break;
case CMDF_ROUND_UP:
*period = lub;
break;
case CMDF_ROUND_DOWN:
*period = glb;
break;
}
/* set clock bits for config register appropriately */
devpriv->config_bits &= ~CLOCK_MASK;
if (*period == lub) {
devpriv->config_bits |=
CLOCK_SELECT_BITS(lub_index) |
CLOCK_DIVISOR_BITS(lub_divisor_shift);
} else {
devpriv->config_bits |=
CLOCK_SELECT_BITS(glb_index) |
CLOCK_DIVISOR_BITS(glb_divisor_shift);
}
return 0;
}
static int a2150_set_chanlist(struct comedi_device *dev,
unsigned int start_channel,
unsigned int num_channels)
{
struct a2150_private *devpriv = dev->private;
if (start_channel + num_channels > 4)
return -1;
devpriv->config_bits &= ~CHANNEL_MASK;
switch (num_channels) {
case 1:
devpriv->config_bits |= CHANNEL_BITS(0x4 | start_channel);
break;
case 2:
if (start_channel == 0)
devpriv->config_bits |= CHANNEL_BITS(0x2);
else if (start_channel == 2)
devpriv->config_bits |= CHANNEL_BITS(0x3);
else
return -1;
break;
case 4:
devpriv->config_bits |= CHANNEL_BITS(0x1);
break;
default:
return -1;
}
return 0;
}
static int a2150_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
int i;
if (cmd->chanlist_len == 2 && (chan0 == 1 || chan0 == 3)) {
dev_dbg(dev->class_dev,
"length 2 chanlist must be channels 0,1 or channels 2,3\n");
return -EINVAL;
}
if (cmd->chanlist_len == 3) {
dev_dbg(dev->class_dev,
"chanlist must have 1,2 or 4 channels\n");
return -EINVAL;
}
for (i = 1; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned int aref = CR_AREF(cmd->chanlist[i]);
if (chan != (chan0 + i)) {
dev_dbg(dev->class_dev,
"entries in chanlist must be consecutive channels, counting upwards\n");
return -EINVAL;
}
if (chan == 2)
aref0 = aref;
if (aref != aref0) {
dev_dbg(dev->class_dev,
"channels 0/1 and 2/3 must have the same analog reference\n");
return -EINVAL;
}
}
return 0;
}
static int a2150_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
const struct a2150_board *board = dev->board_ptr;
int err = 0;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= comedi_check_trigger_is_unique(cmd->start_src);
err |= comedi_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
board->ai_speed);
}
err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->scan_begin_arg;
a2150_get_timing(dev, &arg, cmd->flags);
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= a2150_ai_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct a2150_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
struct comedi_isadma_desc *desc = &dma->desc[0];
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int old_config_bits = devpriv->config_bits;
unsigned int trigger_bits;
if (cmd->flags & CMDF_PRIORITY) {
dev_err(dev->class_dev,
"dma incompatible with hard real-time interrupt (CMDF_PRIORITY), aborting\n");
return -1;
}
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
/* setup chanlist */
if (a2150_set_chanlist(dev, CR_CHAN(cmd->chanlist[0]),
cmd->chanlist_len) < 0)
return -1;
/* setup ac/dc coupling */
if (CR_AREF(cmd->chanlist[0]) == AREF_OTHER)
devpriv->config_bits |= AC0_BIT;
else
devpriv->config_bits &= ~AC0_BIT;
if (CR_AREF(cmd->chanlist[2]) == AREF_OTHER)
devpriv->config_bits |= AC1_BIT;
else
devpriv->config_bits &= ~AC1_BIT;
/* setup timing */
a2150_get_timing(dev, &cmd->scan_begin_arg, cmd->flags);
/* send timing, channel, config bits */
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
/* initialize number of samples remaining */
devpriv->count = cmd->stop_arg * cmd->chanlist_len;
comedi_isadma_disable(desc->chan);
/* set size of transfer to fill in 1/3 second */
#define ONE_THIRD_SECOND 333333333
desc->size = comedi_bytes_per_sample(s) * cmd->chanlist_len *
ONE_THIRD_SECOND / cmd->scan_begin_arg;
if (desc->size > desc->maxsize)
desc->size = desc->maxsize;
if (desc->size < comedi_bytes_per_sample(s))
desc->size = comedi_bytes_per_sample(s);
desc->size -= desc->size % comedi_bytes_per_sample(s);
comedi_isadma_program(desc);
/*
* Clear dma interrupt before enabling it, to try and get rid of
* that one spurious interrupt that has been happening.
*/
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
/* enable dma on card */
devpriv->irq_dma_bits |= DMA_INTR_EN_BIT | DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
/* may need to wait 72 sampling periods if timing was changed */
comedi_8254_load(dev->pacer, 2, 72, I8254_MODE0 | I8254_BINARY);
/* setup start triggering */
trigger_bits = 0;
/* decide if we need to wait 72 periods for valid data */
if (cmd->start_src == TRIG_NOW &&
(old_config_bits & CLOCK_MASK) !=
(devpriv->config_bits & CLOCK_MASK)) {
/* set trigger source to delay trigger */
trigger_bits |= DELAY_TRIGGER_BITS;
} else {
/* otherwise no delay */
trigger_bits |= POST_TRIGGER_BITS;
}
/* enable external hardware trigger */
if (cmd->start_src == TRIG_EXT) {
trigger_bits |= HW_TRIG_EN;
} else if (cmd->start_src == TRIG_OTHER) {
/*
* XXX add support for level/slope start trigger
* using TRIG_OTHER
*/
dev_err(dev->class_dev, "you shouldn't see this?\n");
}
/* send trigger config bits */
outw(trigger_bits, dev->iobase + TRIGGER_REG);
/* start acquisition for soft trigger */
if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
return 0;
}
static int a2150_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + STATUS_REG);
if (status & FNE_BIT)
return 0;
return -EBUSY;
}
static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct a2150_private *devpriv = dev->private;
unsigned int n;
int ret;
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
/* setup chanlist */
if (a2150_set_chanlist(dev, CR_CHAN(insn->chanspec), 1) < 0)
return -1;
/* set dc coupling */
devpriv->config_bits &= ~AC0_BIT;
devpriv->config_bits &= ~AC1_BIT;
/* send timing, channel, config bits */
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
/* disable dma on card */
devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
/* setup start triggering */
outw(0, dev->iobase + TRIGGER_REG);
/* start acquisition for soft trigger */
outw(0, dev->iobase + FIFO_START_REG);
/*
* there is a 35.6 sample delay for data to get through the
* antialias filter
*/
for (n = 0; n < 36; n++) {
ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
if (ret)
return ret;
inw(dev->iobase + FIFO_DATA_REG);
}
/* read data */
for (n = 0; n < insn->n; n++) {
ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
if (ret)
return ret;
data[n] = inw(dev->iobase + FIFO_DATA_REG);
data[n] ^= 0x8000;
}
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
return n;
}
static void a2150_alloc_irq_and_dma(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct a2150_private *devpriv = dev->private;
unsigned int irq_num = it->options[1];
unsigned int dma_chan = it->options[2];
/*
* Only IRQs 15, 14, 12-9, and 7-3 are valid.
* Only DMA channels 7-5 and 3-0 are valid.
*/
if (irq_num > 15 || dma_chan > 7 ||
!((1 << irq_num) & 0xdef8) || !((1 << dma_chan) & 0xef))
return;
if (request_irq(irq_num, a2150_interrupt, 0, dev->board_name, dev))
return;
/* DMA uses 1 buffer */
devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
A2150_DMA_BUFFER_SIZE,
COMEDI_ISADMA_READ);
if (!devpriv->dma) {
free_irq(irq_num, dev);
} else {
dev->irq = irq_num;
devpriv->irq_dma_bits = IRQ_LVL_BITS(irq_num) |
DMA_CHAN_BITS(dma_chan);
}
}
static void a2150_free_dma(struct comedi_device *dev)
{
struct a2150_private *devpriv = dev->private;
if (devpriv)
comedi_isadma_free(devpriv->dma);
}
static const struct a2150_board *a2150_probe(struct comedi_device *dev)
{
int id = ID_BITS(inw(dev->iobase + STATUS_REG));
if (id >= ARRAY_SIZE(a2150_boards))
return NULL;
return &a2150_boards[id];
}
static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct a2150_board *board;
struct a2150_private *devpriv;
struct comedi_subdevice *s;
static const int timeout = 2000;
int i;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x1c);
if (ret)
return ret;
board = a2150_probe(dev);
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
/* an IRQ and DMA are required to support async commands */
a2150_alloc_irq_and_dma(dev, it);
dev->pacer = comedi_8254_io_alloc(dev->iobase + I8253_BASE_REG,
0, I8254_IO8, 0);
if (IS_ERR(dev->pacer))
return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
/* analog input subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER;
s->n_chan = 4;
s->maxdata = 0xffff;
s->range_table = &range_a2150;
s->insn_read = a2150_ai_rinsn;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = s->n_chan;
s->do_cmd = a2150_ai_cmd;
s->do_cmdtest = a2150_ai_cmdtest;
s->cancel = a2150_cancel;
}
/* set card's irq and dma levels */
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
/* reset and sync adc clock circuitry */
outw_p(DPD_BIT | APD_BIT, dev->iobase + CONFIG_REG);
outw_p(DPD_BIT, dev->iobase + CONFIG_REG);
/* initialize configuration register */
devpriv->config_bits = 0;
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
/* wait until offset calibration is done, then enable analog inputs */
for (i = 0; i < timeout; i++) {
if ((DCAL_BIT & inw(dev->iobase + STATUS_REG)) == 0)
break;
usleep_range(1000, 3000);
}
if (i == timeout) {
dev_err(dev->class_dev,
"timed out waiting for offset calibration to complete\n");
return -ETIME;
}
devpriv->config_bits |= ENABLE0_BIT | ENABLE1_BIT;
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
return 0;
};
static void a2150_detach(struct comedi_device *dev)
{
if (dev->iobase)
outw(APD_BIT | DPD_BIT, dev->iobase + CONFIG_REG);
a2150_free_dma(dev);
comedi_legacy_detach(dev);
};
static struct comedi_driver ni_at_a2150_driver = {
.driver_name = "ni_at_a2150",
.module = THIS_MODULE,
.attach = a2150_attach,
.detach = a2150_detach,
};
module_comedi_driver(ni_at_a2150_driver);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
|
/*
* Linux V4L2 radio driver for the Griffin radioSHARK USB radio receiver
*
* Note the radioSHARK offers the audio through a regular USB audio device,
* this driver only handles the tuning.
*
* The info necessary to drive the shark was taken from the small userspace
* shark.c program by Michael Rolig, which he kindly placed in the Public
* Domain.
*
* Copyright (c) 2012 Hans de Goede <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <media/v4l2-device.h>
#include <media/drv-intf/tea575x.h>
#if defined(CONFIG_LEDS_CLASS) || \
(defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE))
#define SHARK_USE_LEDS 1
#endif
/*
* Version Information
*/
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("Griffin radioSHARK, USB radio receiver driver");
MODULE_LICENSE("GPL");
#define SHARK_IN_EP 0x83
#define SHARK_OUT_EP 0x05
#define TEA575X_BIT_MONO (1<<22) /* 0 = stereo, 1 = mono */
#define TEA575X_BIT_BAND_MASK (3<<20)
#define TEA575X_BIT_BAND_FM (0<<20)
#define TB_LEN 6
#define DRV_NAME "radioshark"
#define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev)
/* Note BLUE_IS_PULSE comes after NO_LEDS as it is a status bit, not a LED */
enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS, BLUE_IS_PULSE };
struct shark_device {
struct usb_device *usbdev;
struct v4l2_device v4l2_dev;
struct snd_tea575x tea;
#ifdef SHARK_USE_LEDS
struct work_struct led_work;
struct led_classdev leds[NO_LEDS];
char led_names[NO_LEDS][32];
atomic_t brightness[NO_LEDS];
unsigned long brightness_new;
#endif
u8 *transfer_buffer;
u32 last_val;
};
static atomic_t shark_instance = ATOMIC_INIT(0);
static void shark_write_val(struct snd_tea575x *tea, u32 val)
{
struct shark_device *shark = tea->private_data;
int i, res, actual_len;
/* Avoid unnecessary (slow) USB transfers */
if (shark->last_val == val)
return;
memset(shark->transfer_buffer, 0, TB_LEN);
shark->transfer_buffer[0] = 0xc0; /* Write shift register command */
for (i = 0; i < 4; i++)
shark->transfer_buffer[i] |= (val >> (24 - i * 8)) & 0xff;
res = usb_interrupt_msg(shark->usbdev,
usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
shark->transfer_buffer, TB_LEN,
&actual_len, 1000);
if (res >= 0)
shark->last_val = val;
else
v4l2_err(&shark->v4l2_dev, "set-freq error: %d\n", res);
}
static u32 shark_read_val(struct snd_tea575x *tea)
{
struct shark_device *shark = tea->private_data;
int i, res, actual_len;
u32 val = 0;
memset(shark->transfer_buffer, 0, TB_LEN);
shark->transfer_buffer[0] = 0x80;
res = usb_interrupt_msg(shark->usbdev,
usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
shark->transfer_buffer, TB_LEN,
&actual_len, 1000);
if (res < 0) {
v4l2_err(&shark->v4l2_dev, "request-status error: %d\n", res);
return shark->last_val;
}
res = usb_interrupt_msg(shark->usbdev,
usb_rcvintpipe(shark->usbdev, SHARK_IN_EP),
shark->transfer_buffer, TB_LEN,
&actual_len, 1000);
if (res < 0) {
v4l2_err(&shark->v4l2_dev, "get-status error: %d\n", res);
return shark->last_val;
}
for (i = 0; i < 4; i++)
val |= shark->transfer_buffer[i] << (24 - i * 8);
shark->last_val = val;
/*
* The shark does not allow actually reading the stereo / mono pin :(
* So assume that when we're tuned to an FM station and mono has not
* been requested, that we're receiving stereo.
*/
if (((val & TEA575X_BIT_BAND_MASK) == TEA575X_BIT_BAND_FM) &&
!(val & TEA575X_BIT_MONO))
shark->tea.stereo = true;
else
shark->tea.stereo = false;
return val;
}
static const struct snd_tea575x_ops shark_tea_ops = {
.write_val = shark_write_val,
.read_val = shark_read_val,
};
#ifdef SHARK_USE_LEDS
static void shark_led_work(struct work_struct *work)
{
struct shark_device *shark =
container_of(work, struct shark_device, led_work);
int i, res, brightness, actual_len;
for (i = 0; i < 3; i++) {
if (!test_and_clear_bit(i, &shark->brightness_new))
continue;
brightness = atomic_read(&shark->brightness[i]);
memset(shark->transfer_buffer, 0, TB_LEN);
if (i != RED_LED) {
shark->transfer_buffer[0] = 0xA0 + i;
shark->transfer_buffer[1] = brightness;
} else
shark->transfer_buffer[0] = brightness ? 0xA9 : 0xA8;
res = usb_interrupt_msg(shark->usbdev,
usb_sndintpipe(shark->usbdev, 0x05),
shark->transfer_buffer, TB_LEN,
&actual_len, 1000);
if (res < 0)
v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
shark->led_names[i], res);
}
}
static void shark_led_set_blue(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct shark_device *shark =
container_of(led_cdev, struct shark_device, leds[BLUE_LED]);
atomic_set(&shark->brightness[BLUE_LED], value);
set_bit(BLUE_LED, &shark->brightness_new);
clear_bit(BLUE_IS_PULSE, &shark->brightness_new);
schedule_work(&shark->led_work);
}
static void shark_led_set_blue_pulse(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct shark_device *shark = container_of(led_cdev,
struct shark_device, leds[BLUE_PULSE_LED]);
atomic_set(&shark->brightness[BLUE_PULSE_LED], 256 - value);
set_bit(BLUE_PULSE_LED, &shark->brightness_new);
set_bit(BLUE_IS_PULSE, &shark->brightness_new);
schedule_work(&shark->led_work);
}
static void shark_led_set_red(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct shark_device *shark =
container_of(led_cdev, struct shark_device, leds[RED_LED]);
atomic_set(&shark->brightness[RED_LED], value);
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
static const struct led_classdev shark_led_templates[NO_LEDS] = {
[BLUE_LED] = {
.name = "%s:blue:",
.brightness = LED_OFF,
.max_brightness = 127,
.brightness_set = shark_led_set_blue,
},
[BLUE_PULSE_LED] = {
.name = "%s:blue-pulse:",
.brightness = LED_OFF,
.max_brightness = 255,
.brightness_set = shark_led_set_blue_pulse,
},
[RED_LED] = {
.name = "%s:red:",
.brightness = LED_OFF,
.max_brightness = 1,
.brightness_set = shark_led_set_red,
},
};
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
int i, retval;
atomic_set(&shark->brightness[BLUE_LED], 127);
INIT_WORK(&shark->led_work, shark_led_work);
for (i = 0; i < NO_LEDS; i++) {
shark->leds[i] = shark_led_templates[i];
snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
shark->leds[i].name, shark->v4l2_dev.name);
shark->leds[i].name = shark->led_names[i];
retval = led_classdev_register(dev, &shark->leds[i]);
if (retval) {
v4l2_err(&shark->v4l2_dev,
"couldn't register led: %s\n",
shark->led_names[i]);
return retval;
}
}
return 0;
}
static void shark_unregister_leds(struct shark_device *shark)
{
int i;
for (i = 0; i < NO_LEDS; i++)
led_classdev_unregister(&shark->leds[i]);
cancel_work_sync(&shark->led_work);
}
static inline void shark_resume_leds(struct shark_device *shark)
{
if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
set_bit(BLUE_PULSE_LED, &shark->brightness_new);
else
set_bit(BLUE_LED, &shark->brightness_new);
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
v4l2_warn(&shark->v4l2_dev,
"CONFIG_LEDS_CLASS not enabled, LED support disabled\n");
return 0;
}
static inline void shark_unregister_leds(struct shark_device *shark) { }
static inline void shark_resume_leds(struct shark_device *shark) { }
#endif
static void usb_shark_disconnect(struct usb_interface *intf)
{
struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
mutex_lock(&shark->tea.mutex);
v4l2_device_disconnect(&shark->v4l2_dev);
snd_tea575x_exit(&shark->tea);
mutex_unlock(&shark->tea.mutex);
shark_unregister_leds(shark);
v4l2_device_put(&shark->v4l2_dev);
}
static void usb_shark_release(struct v4l2_device *v4l2_dev)
{
struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
v4l2_device_unregister(&shark->v4l2_dev);
kfree(shark->transfer_buffer);
kfree(shark);
}
static int usb_shark_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct shark_device *shark;
int retval = -ENOMEM;
static const u8 ep_addresses[] = {
SHARK_IN_EP | USB_DIR_IN,
SHARK_OUT_EP | USB_DIR_OUT,
0};
/* Are the expected endpoints present? */
if (!usb_check_int_endpoints(intf, ep_addresses)) {
dev_err(&intf->dev, "Invalid radioSHARK device\n");
return -EINVAL;
}
shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
if (!shark)
return retval;
shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
if (!shark->transfer_buffer)
goto err_alloc_buffer;
v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
retval = shark_register_leds(shark, &intf->dev);
if (retval)
goto err_reg_leds;
shark->v4l2_dev.release = usb_shark_release;
retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
if (retval) {
v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
goto err_reg_dev;
}
shark->usbdev = interface_to_usbdev(intf);
shark->tea.v4l2_dev = &shark->v4l2_dev;
shark->tea.private_data = shark;
shark->tea.radio_nr = -1;
shark->tea.ops = &shark_tea_ops;
shark->tea.cannot_mute = true;
shark->tea.has_am = true;
strscpy(shark->tea.card, "Griffin radioSHARK",
sizeof(shark->tea.card));
usb_make_path(shark->usbdev, shark->tea.bus_info,
sizeof(shark->tea.bus_info));
retval = snd_tea575x_init(&shark->tea, THIS_MODULE);
if (retval) {
v4l2_err(&shark->v4l2_dev, "couldn't init tea5757\n");
goto err_init_tea;
}
return 0;
err_init_tea:
v4l2_device_unregister(&shark->v4l2_dev);
err_reg_dev:
shark_unregister_leds(shark);
err_reg_leds:
kfree(shark->transfer_buffer);
err_alloc_buffer:
kfree(shark);
return retval;
}
#ifdef CONFIG_PM
static int usb_shark_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int usb_shark_resume(struct usb_interface *intf)
{
struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
mutex_lock(&shark->tea.mutex);
snd_tea575x_set_freq(&shark->tea);
mutex_unlock(&shark->tea.mutex);
shark_resume_leds(shark);
return 0;
}
#endif
/* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */
static const struct usb_device_id usb_shark_device_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = 0x077d,
.idProduct = 0x627a,
.bcdDevice_lo = 0x0001,
.bcdDevice_hi = 0x0001,
.bInterfaceClass = 3,
},
{ }
};
MODULE_DEVICE_TABLE(usb, usb_shark_device_table);
static struct usb_driver usb_shark_driver = {
.name = DRV_NAME,
.probe = usb_shark_probe,
.disconnect = usb_shark_disconnect,
.id_table = usb_shark_device_table,
#ifdef CONFIG_PM
.suspend = usb_shark_suspend,
.resume = usb_shark_resume,
.reset_resume = usb_shark_resume,
#endif
};
module_usb_driver(usb_shark_driver);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AHCI_REMAP_H
#define _LINUX_AHCI_REMAP_H
#include <linux/sizes.h>
#define AHCI_VSCAP 0xa4
#define AHCI_REMAP_CAP 0x800
/* device class code */
#define AHCI_REMAP_N_DCC 0x880
/* remap-device base relative to ahci-bar */
#define AHCI_REMAP_N_OFFSET SZ_16K
#define AHCI_REMAP_N_SIZE SZ_16K
#define AHCI_MAX_REMAP 3
static inline unsigned int ahci_remap_dcc(int i)
{
return AHCI_REMAP_N_DCC + i * 0x80;
}
static inline unsigned int ahci_remap_base(int i)
{
return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE;
}
#endif /* _LINUX_AHCI_REMAP_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
*
* Copyright (C) 2016 Google, Inc
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
/*
* Hard coded to the first device to support sensor fifo. The EC has a 2048
* byte fifo and will trigger an interrupt when fifo is 2/3 full.
*/
#define CROS_EC_FIFO_SIZE (2048 * 2 / 3)
static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
u16 cmd_offset, u16 cmd, u32 *mask)
{
int ret;
struct {
struct cros_ec_command msg;
union {
struct ec_params_get_cmd_versions params;
struct ec_response_get_cmd_versions resp;
};
} __packed buf = {
.msg = {
.command = EC_CMD_GET_CMD_VERSIONS + cmd_offset,
.insize = sizeof(struct ec_response_get_cmd_versions),
.outsize = sizeof(struct ec_params_get_cmd_versions)
},
.params = {.cmd = cmd}
};
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
if (ret >= 0)
*mask = buf.resp.version_mask;
return ret;
}
static void get_default_min_max_freq(enum motionsensor_type type,
u32 *min_freq,
u32 *max_freq,
u32 *max_fifo_events)
{
/*
* We don't know fifo size, set to size previously used by older
* hardware.
*/
*max_fifo_events = CROS_EC_FIFO_SIZE;
switch (type) {
case MOTIONSENSE_TYPE_ACCEL:
*min_freq = 12500;
*max_freq = 100000;
break;
case MOTIONSENSE_TYPE_GYRO:
*min_freq = 25000;
*max_freq = 100000;
break;
case MOTIONSENSE_TYPE_MAG:
*min_freq = 5000;
*max_freq = 25000;
break;
case MOTIONSENSE_TYPE_PROX:
case MOTIONSENSE_TYPE_LIGHT:
*min_freq = 100;
*max_freq = 50000;
break;
case MOTIONSENSE_TYPE_BARO:
*min_freq = 250;
*max_freq = 20000;
break;
case MOTIONSENSE_TYPE_ACTIVITY:
default:
*min_freq = 0;
*max_freq = 0;
break;
}
}
static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st,
int rate)
{
int ret;
if (rate > U16_MAX)
rate = U16_MAX;
mutex_lock(&st->cmd_lock);
st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
st->param.ec_rate.data = rate;
ret = cros_ec_motion_send_host_cmd(st, 0);
mutex_unlock(&st->cmd_lock);
return ret;
}
static ssize_t cros_ec_sensor_set_report_latency(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int integer, fract, ret;
int latency;
ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
if (ret)
return ret;
/* EC rate is in ms. */
latency = integer * 1000 + fract / 1000;
ret = cros_ec_sensor_set_ec_rate(st, latency);
if (ret < 0)
return ret;
return len;
}
static ssize_t cros_ec_sensor_get_report_latency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int latency, ret;
mutex_lock(&st->cmd_lock);
st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
st->param.ec_rate.data = EC_MOTION_SENSE_NO_VALUE;
ret = cros_ec_motion_send_host_cmd(st, 0);
latency = st->resp->ec_rate.ret;
mutex_unlock(&st->cmd_lock);
if (ret < 0)
return ret;
return sprintf(buf, "%d.%06u\n",
latency / 1000,
(latency % 1000) * 1000);
}
static IIO_DEVICE_ATTR(hwfifo_timeout, 0644,
cros_ec_sensor_get_report_latency,
cros_ec_sensor_set_report_latency, 0);
static ssize_t hwfifo_watermark_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", st->fifo_max_event_count);
}
static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
static const struct iio_dev_attr *cros_ec_sensor_fifo_attributes[] = {
&iio_dev_attr_hwfifo_timeout,
&iio_dev_attr_hwfifo_watermark_max,
NULL,
};
int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
s16 *data,
s64 timestamp)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
s16 *out;
s64 delta;
unsigned int i;
/*
* Ignore samples if the buffer is not set: it is needed if the ODR is
* set but the buffer is not enabled yet.
*
* Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
* is not enabled.
*/
if (iio_device_claim_buffer_mode(indio_dev) < 0)
return 0;
out = (s16 *)st->samples;
iio_for_each_active_channel(indio_dev, i) {
*out = data[i];
out++;
}
if (iio_device_get_clock(indio_dev) != CLOCK_BOOTTIME)
delta = iio_get_time_ns(indio_dev) - cros_ec_get_time_ns();
else
delta = 0;
iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
timestamp + delta);
iio_device_release_buffer_mode(indio_dev);
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
static void cros_ec_sensors_core_clean(void *arg)
{
struct platform_device *pdev = (struct platform_device *)arg;
struct cros_ec_sensorhub *sensor_hub =
dev_get_drvdata(pdev->dev.parent);
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
u8 sensor_num = st->param.info.sensor_num;
cros_ec_sensorhub_unregister_push_data(sensor_hub, sensor_num);
}
/**
* cros_ec_sensors_core_init() - basic initialization of the core structure
* @pdev: platform device created for the sensor
* @indio_dev: iio device structure of the device
* @physical_device: true if the device refers to a physical device
* @trigger_capture: function pointer to call buffer is triggered,
* for backward compatibility.
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
bool physical_device,
cros_ec_sensors_capture_t trigger_capture)
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask, temp;
int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 };
int ret, i;
platform_set_drvdata(pdev, indio_dev);
state->ec = ec->ec_dev;
state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
max((u16)sizeof(struct ec_params_motion_sense),
state->ec->max_response), GFP_KERNEL);
if (!state->msg)
return -ENOMEM;
state->resp = (struct ec_response_motion_sense *)state->msg->data;
mutex_init(&state->cmd_lock);
ret = cros_ec_get_host_cmd_version_mask(state->ec,
ec->cmd_offset,
EC_CMD_MOTION_SENSE_CMD,
&ver_mask);
if (ret < 0)
return ret;
/* Set up the host command structure. */
state->msg->version = fls(ver_mask) - 1;
state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
state->msg->outsize = sizeof(struct ec_params_motion_sense);
indio_dev->name = pdev->name;
if (physical_device) {
enum motionsensor_location loc;
state->param.cmd = MOTIONSENSE_CMD_INFO;
state->param.info.sensor_num = sensor_platform->sensor_num;
ret = cros_ec_motion_send_host_cmd(state, 0);
if (ret) {
dev_warn(dev, "Can not access sensor info\n");
return ret;
}
state->type = state->resp->info.type;
loc = state->resp->info.location;
if (loc == MOTIONSENSE_LOC_BASE)
indio_dev->label = "accel-base";
else if (loc == MOTIONSENSE_LOC_LID)
indio_dev->label = "accel-display";
else if (loc == MOTIONSENSE_LOC_CAMERA)
indio_dev->label = "accel-camera";
/* Set sign vector, only used for backward compatibility. */
memset(state->sign, 1, CROS_EC_SENSOR_MAX_AXIS);
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE;
/* 0 is a correct value used to stop the device */
if (state->msg->version < 3) {
get_default_min_max_freq(state->resp->info.type,
&frequencies[1],
&frequencies[2],
&state->fifo_max_event_count);
} else {
if (state->resp->info_3.max_frequency == 0) {
get_default_min_max_freq(state->resp->info.type,
&frequencies[1],
&frequencies[2],
&temp);
} else {
frequencies[1] = state->resp->info_3.min_frequency;
frequencies[2] = state->resp->info_3.max_frequency;
}
state->fifo_max_event_count = state->resp->info_3.fifo_max_event_count;
}
for (i = 0; i < ARRAY_SIZE(frequencies); i++) {
state->frequencies[2 * i] = frequencies[i] / 1000;
state->frequencies[2 * i + 1] =
(frequencies[i] % 1000) * 1000;
}
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
/*
* Create a software buffer, feed by the EC FIFO.
* We can not use trigger here, as events are generated
* as soon as sample_frequency is set.
*/
ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL,
cros_ec_sensor_fifo_attributes);
if (ret)
return ret;
/* Timestamp coming from FIFO are in ns since boot. */
ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME);
if (ret)
return ret;
} else {
/*
* The only way to get samples in buffer is to set a
* software trigger (systrig, hrtimer).
*/
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
NULL, trigger_capture, NULL);
if (ret)
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
/**
* cros_ec_sensors_core_register() - Register callback to FIFO and IIO when
* sensor is ready.
* It must be called at the end of the sensor probe routine.
* @dev: device created for the sensor
* @indio_dev: iio device structure of the device
* @push_data: function to call when cros_ec_sensorhub receives
* a sample for that sensor.
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_sensors_core_register(struct device *dev,
struct iio_dev *indio_dev,
cros_ec_sensorhub_push_data_cb_t push_data)
{
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
struct platform_device *pdev = to_platform_device(dev);
struct cros_ec_dev *ec = sensor_hub->ec;
int ret;
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
if (!push_data ||
!cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
return 0;
ret = cros_ec_sensorhub_register_push_data(
sensor_hub, sensor_platform->sensor_num,
indio_dev, push_data);
if (ret)
return ret;
return devm_add_action_or_reset(
dev, cros_ec_sensors_core_clean, pdev);
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_register);
/**
* cros_ec_motion_send_host_cmd() - send motion sense host command
* @state: pointer to state information for device
* @opt_length: optional length to reduce the response size, useful on the data
* path. Otherwise, the maximal allowed response size is used
*
* When called, the sub-command is assumed to be set in param->cmd.
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
u16 opt_length)
{
int ret;
if (opt_length)
state->msg->insize = min(opt_length, state->ec->max_response);
else
state->msg->insize = state->ec->max_response;
memcpy(state->msg->data, &state->param, sizeof(state->param));
ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
if (ret < 0)
return ret;
if (ret &&
state->resp != (struct ec_response_motion_sense *)state->msg->data)
memcpy(state->resp, state->msg->data, ret);
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
uintptr_t private, const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int ret, i;
bool calibrate;
ret = kstrtobool(buf, &calibrate);
if (ret < 0)
return ret;
if (!calibrate)
return -EINVAL;
mutex_lock(&st->cmd_lock);
st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
ret = cros_ec_motion_send_host_cmd(st, 0);
if (ret != 0) {
dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
} else {
/* Save values */
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
st->calib[i].offset = st->resp->perform_calib.offset[i];
}
mutex_unlock(&st->cmd_lock);
return ret ? ret : len;
}
static ssize_t cros_ec_sensors_id(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan, char *buf)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
return snprintf(buf, PAGE_SIZE, "%d\n", st->param.info.sensor_num);
}
const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
{
.name = "calibrate",
.shared = IIO_SHARED_BY_ALL,
.write = cros_ec_sensors_calibrate
},
{
.name = "id",
.shared = IIO_SHARED_BY_ALL,
.read = cros_ec_sensors_id
},
{ },
};
EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
/**
* cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
* @st: pointer to state information for device
* @idx: sensor index (should be element of enum sensor_index)
*
* Return: address to read at
*/
static unsigned int cros_ec_sensors_idx_to_reg(
struct cros_ec_sensors_core_state *st,
unsigned int idx)
{
/*
* When using LPC interface, only space for 2 Accel and one Gyro.
* First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
*/
if (st->type == MOTIONSENSE_TYPE_ACCEL)
return EC_MEMMAP_ACC_DATA + sizeof(u16) *
(1 + idx + st->param.info.sensor_num *
CROS_EC_SENSOR_MAX_AXIS);
return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
}
static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
unsigned int offset, u8 *dest)
{
return ec->cmd_readmem(ec, offset, 1, dest);
}
static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
unsigned int offset, u16 *dest)
{
__le16 tmp;
int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
if (ret >= 0)
*dest = le16_to_cpu(tmp);
return ret;
}
/**
* cros_ec_sensors_read_until_not_busy() - read until is not busy
*
* @st: pointer to state information for device
*
* Read from EC status byte until it reads not busy.
* Return: 8-bit status if ok, -errno on failure.
*/
static int cros_ec_sensors_read_until_not_busy(
struct cros_ec_sensors_core_state *st)
{
struct cros_ec_device *ec = st->ec;
u8 status;
int ret, attempts = 0;
ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
if (ret < 0)
return ret;
while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
/* Give up after enough attempts, return error. */
if (attempts++ >= 50)
return -EIO;
/* Small delay every so often. */
if (attempts % 5 == 0)
msleep(25);
ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
&status);
if (ret < 0)
return ret;
}
return status;
}
/**
* cros_ec_sensors_read_data_unsafe() - read acceleration data from EC shared memory
* @indio_dev: pointer to IIO device
* @scan_mask: bitmap of the sensor indices to scan
* @data: location to store data
*
* This is the unsafe function for reading the EC data. It does not guarantee
* that the EC will not modify the data as it is being read in.
*
* Return: 0 on success, -errno on failure.
*/
static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
struct cros_ec_device *ec = st->ec;
unsigned int i;
int ret;
/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
ret = cros_ec_sensors_cmd_read_u16(ec,
cros_ec_sensors_idx_to_reg(st, i),
data);
if (ret < 0)
return ret;
*data *= st->sign[i];
data++;
}
return 0;
}
/**
* cros_ec_sensors_read_lpc() - read acceleration data from EC shared memory.
* @indio_dev: pointer to IIO device.
* @scan_mask: bitmap of the sensor indices to scan.
* @data: location to store data.
*
* Note: this is the safe function for reading the EC data. It guarantees
* that the data sampled was not modified by the EC while being read.
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
struct cros_ec_device *ec = st->ec;
u8 samp_id = 0xff, status = 0;
int ret, attempts = 0;
/*
* Continually read all data from EC until the status byte after
* all reads reflects that the EC is not busy and the sample id
* matches the sample id from before all reads. This guarantees
* that data read in was not modified by the EC while reading.
*/
while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
/* If we have tried to read too many times, return error. */
if (attempts++ >= 5)
return -EIO;
/* Read status byte until EC is not busy. */
ret = cros_ec_sensors_read_until_not_busy(st);
if (ret < 0)
return ret;
/*
* Store the current sample id so that we can compare to the
* sample id after reading the data.
*/
samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
/* Read all EC data, format it, and store it into data. */
ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
data);
if (ret < 0)
return ret;
/* Read status byte. */
ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
&status);
if (ret < 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
/**
* cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
* @indio_dev: pointer to IIO device
* @scan_mask: bitmap of the sensor indices to scan
* @data: location to store data
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int ret;
unsigned int i;
/* Read all sensor data through a command. */
st->param.cmd = MOTIONSENSE_CMD_DATA;
ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
if (ret != 0) {
dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
return ret;
}
for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
*data = st->resp->data.data[i];
data++;
}
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
/**
* cros_ec_sensors_capture() - the trigger handler function
* @irq: the interrupt number.
* @p: a pointer to the poll function.
*
* On a trigger event occurring, if the pollfunc is attached then this
* handler is called as a threaded interrupt (and hence may sleep). It
* is responsible for grabbing data from the device and pushing it into
* the associated buffer.
*
* Return: IRQ_HANDLED
*/
irqreturn_t cros_ec_sensors_capture(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->cmd_lock);
/* Clear capture data. */
memset(st->samples, 0, indio_dev->scan_bytes);
/* Read data based on which channels are enabled in scan mask. */
ret = st->read_ec_sensors_data(indio_dev,
*(indio_dev->active_scan_mask),
(s16 *)st->samples);
if (ret < 0)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
iio_get_time_ns(indio_dev));
done:
/*
* Tell the core we are done with this trigger and ready for the
* next one.
*/
iio_trigger_notify_done(indio_dev->trig);
mutex_unlock(&st->cmd_lock);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
/**
* cros_ec_sensors_core_read() - function to request a value from the sensor
* @st: pointer to state information for device
* @chan: channel specification structure table
* @val: will contain one element making up the returned value
* @val2: will contain another element making up the returned value
* @mask: specifies which values to be requested
*
* Return: the type of value returned by the device
*/
int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
int ret, frequency;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
st->param.sensor_odr.data =
EC_MOTION_SENSE_NO_VALUE;
ret = cros_ec_motion_send_host_cmd(st, 0);
if (ret)
break;
frequency = st->resp->sensor_odr.ret;
*val = frequency / 1000;
*val2 = (frequency % 1000) * 1000;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
/**
* cros_ec_sensors_core_read_avail() - get available values
* @indio_dev: pointer to state information for device
* @chan: channel specification structure table
* @vals: list of available values
* @type: type of data returned
* @length: number of data returned in the array
* @mask: specifies which values to be requested
*
* Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST
*/
int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals,
int *type,
int *length,
long mask)
{
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*length = ARRAY_SIZE(state->frequencies);
*vals = (const int *)&state->frequencies;
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail);
/**
* cros_ec_sensors_core_write() - function to write a value to the sensor
* @st: pointer to state information for device
* @chan: channel specification structure table
* @val: first part of value to write
* @val2: second part of value to write
* @mask: specifies which values to write
*
* Return: the type of value returned by the device
*/
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
int ret, frequency;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
frequency = val * 1000 + val2 / 1000;
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
st->param.sensor_odr.data = frequency;
/* Always roundup, so caller gets at least what it asks for. */
st->param.sensor_odr.roundup = 1;
ret = cros_ec_motion_send_host_cmd(st, 0);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
static int __maybe_unused cros_ec_sensors_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int ret = 0;
if (st->range_updated) {
mutex_lock(&st->cmd_lock);
st->param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
st->param.sensor_range.data = st->curr_range;
st->param.sensor_range.roundup = 1;
ret = cros_ec_motion_send_host_cmd(st, 0);
mutex_unlock(&st->cmd_lock);
}
return ret;
}
SIMPLE_DEV_PM_OPS(cros_ec_sensors_pm_ops, NULL, cros_ec_sensors_resume);
EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops);
MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2020 NXP
*/
#ifndef _SJA1105_VL_H
#define _SJA1105_VL_H
#include "sja1105.h"
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, unsigned long destports,
bool append);
int sja1105_vl_delete(struct sja1105_private *priv, int port,
struct sja1105_rule *rule,
struct netlink_ext_ack *extack);
int sja1105_vl_gate(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, u32 index, s32 prio,
u64 base_time, u64 cycle_time, u64 cycle_time_ext,
u32 num_entries, struct action_gate_entry *entries);
int sja1105_vl_stats(struct sja1105_private *priv, int port,
struct sja1105_rule *rule, struct flow_stats *stats,
struct netlink_ext_ack *extack);
#else
static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack,
unsigned long cookie,
struct sja1105_key *key,
unsigned long destports,
bool append)
{
NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
return -EOPNOTSUPP;
}
static inline int sja1105_vl_delete(struct sja1105_private *priv,
int port, struct sja1105_rule *rule,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
return -EOPNOTSUPP;
}
static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack,
unsigned long cookie,
struct sja1105_key *key, u32 index, s32 prio,
u64 base_time, u64 cycle_time,
u64 cycle_time_ext, u32 num_entries,
struct action_gate_entry *entries)
{
NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
return -EOPNOTSUPP;
}
static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
struct sja1105_rule *rule,
struct flow_stats *stats,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
return -EOPNOTSUPP;
}
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
#endif /* _SJA1105_VL_H */
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* Copyright(c) 2019 Intel Corporation
*
* Author: Keyon Jie <[email protected]>
*/
#ifndef __SOF_INTEL_HDA_IPC_H
#define __SOF_INTEL_HDA_IPC_H
/*
* Primary register, mapped to
* - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
* - DIPCT in cAVS 1.5 IPC
*
* Secondary register, mapped to:
* - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
* - DIPCTE in cAVS 1.5 IPC
*/
/* Common bits in primary register */
/* Reserved for doorbell */
#define HDA_IPC_RSVD_31 BIT(31)
/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
#define HDA_IPC_MSG_COMPACT BIT(30)
/* Direction, 0 - request, 1 - response */
#define HDA_IPC_RSP BIT(29)
#define HDA_IPC_TYPE_SHIFT 24
#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
/* Command specific payload bits in secondary register */
/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
#define HDA_PM_NO_DMA_TRACE BIT(4)
/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
#define HDA_PM_PCG BIT(3)
/* Prevent power gating (0 - deep power state transitions allowed) */
#define HDA_PM_PPG BIT(2)
/* Indicates whether streaming is active */
#define HDA_PM_PG_STREAMING BIT(1)
#define HDA_PM_PG_RSVD BIT(0)
irqreturn_t cnl_ipc_irq_thread(int irq, void *context);
int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
void cnl_ipc_dump(struct snd_sof_dev *sdev);
void cnl_ipc4_dump(struct snd_sof_dev *sdev);
#endif
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES.
#include "rss.h"
#define mlx5e_rss_warn(__dev, format, ...) \
dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
[MLX5_TT_IPV4_TCP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5_TT_IPV6_TCP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5_TT_IPV4_UDP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5_TT_IPV6_UDP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5_TT_IPV4_IPSEC_AH] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5_TT_IPV6_IPSEC_AH] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5_TT_IPV4_IPSEC_ESP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5_TT_IPV6_IPSEC_ESP] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5_TT_IPV4] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP,
},
[MLX5_TT_IPV6] = {
.l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP,
},
};
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt)
{
return rss_default_config[tt];
}
struct mlx5e_rss {
struct mlx5e_rss_params_hash hash;
struct mlx5e_rss_params_indir indir;
u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
bool enabled;
refcount_t refcnt;
};
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
if (!indir->table)
return -ENOMEM;
indir->max_table_size = max_table_size;
indir->actual_table_size = actual_table_size;
return 0;
}
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir)
{
kvfree(indir->table);
}
static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from)
{
u32 *dst_indir_table;
if (to->indir.actual_table_size != from->indir.actual_table_size ||
to->indir.max_table_size != from->indir.max_table_size) {
mlx5e_rss_warn(to->mdev,
"Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n",
from->indir.actual_table_size, from->indir.max_table_size,
to->indir.actual_table_size, to->indir.max_table_size);
return -EINVAL;
}
dst_indir_table = to->indir.table;
*to = *from;
to->indir.table = dst_indir_table;
memcpy(to->indir.table, from->indir.table,
from->indir.actual_table_size * sizeof(*from->indir.table));
return 0;
}
static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
{
struct mlx5e_rss *rss;
int err;
rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
err = mlx5e_rss_copy(rss, from);
if (err)
goto err_free_indir;
return rss;
err_free_indir:
mlx5e_rss_params_indir_cleanup(&rss->indir);
err_free_rss:
kvfree(rss);
return ERR_PTR(err);
}
static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
rss->rx_hash_fields[tt] =
mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
}
static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner)
{
return inner ? &rss->inner_tir[tt] : &rss->tir[tt];
}
static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner)
{
return *rss_get_tirp(rss, tt, inner);
}
static struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
{
struct mlx5e_rss_params_traffic_type rss_tt;
rss_tt = mlx5e_rss_get_default_tt_config(tt);
rss_tt.rx_hash_fields = rss->rx_hash_fields[tt];
return rss_tt;
}
static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
struct mlx5e_tir *tir;
u32 rqtn;
int err;
if (inner && !rss->inner_ft_support) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
return -EINVAL;
}
tir_p = rss_get_tirp(rss, tt, inner);
if (*tir_p)
return -EINVAL;
tir = kvzalloc(sizeof(*tir), GFP_KERNEL);
if (!tir)
return -ENOMEM;
builder = mlx5e_tir_builder_alloc(false);
if (!builder) {
err = -ENOMEM;
goto free_tir;
}
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
rqtn, rss->inner_ft_support);
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
err = mlx5e_tir_init(tir, builder, rss->mdev, true);
mlx5e_tir_builder_free(builder);
if (err) {
mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n",
inner ? "inner " : "", err, tt);
goto free_tir;
}
*tir_p = tir;
return 0;
free_tir:
kvfree(tir);
return err;
}
static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner)
{
struct mlx5e_tir **tir_p;
struct mlx5e_tir *tir;
tir_p = rss_get_tirp(rss, tt, inner);
if (!*tir_p)
return;
tir = *tir_p;
mlx5e_tir_destroy(tir);
kvfree(tir);
*tir_p = NULL;
}
static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
return 0;
err_destroy_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_rss_destroy_tir(rss, tt, inner);
return err;
}
static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner)
{
enum mlx5_traffic_types tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_rss_destroy_tir(rss, tt, inner);
}
static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir *tir;
int err;
tir = rss_get_tir(rss, tt, inner);
if (!tir)
return 0;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
err = mlx5e_tir_modify(tir, builder);
mlx5e_tir_builder_free(builder);
return err;
}
static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
int err, retval;
retval = 0;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_rss_update_tir(rss, tt, false);
if (err) {
retval = retval ? : err;
mlx5e_rss_warn(rss->mdev,
"Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n",
tt, err);
}
if (!rss->inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
if (err) {
retval = retval ? : err;
mlx5e_rss_warn(rss->mdev,
"Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n",
tt, err);
}
}
return retval;
}
static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
{
mlx5e_rss_params_init(rss);
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
rss->drop_rqn, rss->indir.max_table_size);
}
struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
enum mlx5e_rss_init_type type, unsigned int nch,
unsigned int max_nch)
{
struct mlx5e_rss *rss;
int err;
rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
mlx5e_rqt_size(mdev, nch),
mlx5e_rqt_size(mdev, max_nch));
if (err)
goto err_free_rss;
rss->mdev = mdev;
rss->inner_ft_support = inner_ft_support;
rss->drop_rqn = drop_rqn;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
if (type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
goto err_destroy_rqt;
if (inner_ft_support) {
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
if (err)
goto err_destroy_tirs;
}
out:
return rss;
err_destroy_tirs:
mlx5e_rss_destroy_tirs(rss, false);
err_destroy_rqt:
mlx5e_rqt_destroy(&rss->rqt);
err_free_indir:
mlx5e_rss_params_indir_cleanup(&rss->indir);
err_free_rss:
kvfree(rss);
return ERR_PTR(err);
}
int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
{
if (!refcount_dec_if_one(&rss->refcnt))
return -EBUSY;
mlx5e_rss_destroy_tirs(rss, false);
if (rss->inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
mlx5e_rss_params_indir_cleanup(&rss->indir);
kvfree(rss);
return 0;
}
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss)
{
refcount_inc(&rss->refcnt);
}
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss)
{
refcount_dec(&rss->refcnt);
}
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss)
{
return refcount_read(&rss->refcnt);
}
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner)
{
struct mlx5e_tir *tir;
WARN_ON(inner && !rss->inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
return mlx5e_tir_get_tirn(tir);
}
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
tir = rss_get_tir(rss, tt, inner);
if (!tir) { /* TIR doesn't exist, create one */
int err;
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
}
*tirn = mlx5e_tir_get_tirn(tir);
return 0;
}
static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
int err;
err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
&rss->indir);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
return err;
}
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
rss->enabled = true;
mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
}
void mlx5e_rss_disable(struct mlx5e_rss *rss)
{
int err;
rss->enabled = false;
err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
enum mlx5_traffic_types tt;
int err, final_err;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_tir *tir;
tir = rss_get_tir(rss, tt, false);
if (!tir)
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
}
inner_tir:
if (!rss->inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
if (!tir)
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
}
}
mlx5e_tir_builder_free(builder);
return final_err;
}
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
{
if (indir)
memcpy(indir, rss->indir.table,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
if (key)
memcpy(key, rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
if (hfunc)
*hfunc = rss->hash.hfunc;
return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
bool changed_hash = false;
struct mlx5e_rss *old_rss;
int err = 0;
old_rss = mlx5e_rss_init_copy(rss);
if (IS_ERR(old_rss))
return PTR_ERR(old_rss);
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
case ETH_RSS_HASH_XOR:
case ETH_RSS_HASH_TOP:
break;
default:
err = -EINVAL;
goto out;
}
changed_hash = true;
changed_indir = true;
rss->hash.hfunc = *hfunc;
}
if (key) {
if (rss->hash.hfunc == ETH_RSS_HASH_TOP)
changed_hash = true;
memcpy(rss->hash.toeplitz_hash_key, key,
sizeof(rss->hash.toeplitz_hash_key));
}
if (indir) {
changed_indir = true;
memcpy(rss->indir.table, indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
mlx5e_rss_copy(rss, old_rss);
goto out;
}
}
if (changed_hash)
mlx5e_rss_update_tirs(rss);
out:
mlx5e_rss_params_indir_cleanup(&old_rss->indir);
kvfree(old_rss);
return err;
}
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
{
return rss->hash;
}
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
{
return rss->rx_hash_fields[tt];
}
int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
u8 rx_hash_fields)
{
u8 old_rx_hash_fields;
int err;
old_rx_hash_fields = rss->rx_hash_fields[tt];
if (old_rx_hash_fields == rx_hash_fields)
return 0;
rss->rx_hash_fields[tt] = rx_hash_fields;
err = mlx5e_rss_update_tir(rss, tt, false);
if (err) {
rss->rx_hash_fields[tt] = old_rx_hash_fields;
mlx5e_rss_warn(rss->mdev,
"Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n",
tt, err);
return err;
}
if (!(rss->inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);
if (err) {
/* Partial update happened. Try to revert - it may fail too, but
* there is nothing more we can do.
*/
rss->rx_hash_fields[tt] = old_rx_hash_fields;
mlx5e_rss_warn(rss->mdev,
"Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n",
tt, err);
if (mlx5e_rss_update_tir(rss, tt, false))
mlx5e_rss_warn(rss->mdev,
"Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n",
tt);
}
return err;
}
void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch)
{
mlx5e_rss_params_indir_init_uniform(&rss->indir, nch);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Hardware monitoring driver for Maxim MAX16508, MAX16600, MAX16601,
* and MAX16602.
*
* Implementation notes:
*
* This chip series supports two rails, VCORE and VSA. Telemetry information
* for the two rails is reported in two subsequent I2C addresses. The driver
* instantiates a dummy I2C client at the second I2C address to report
* information for the VSA rail in a single instance of the driver.
* Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2.
*
* The chip reports input current using two separate methods. The input current
* reported with the standard READ_IIN command is derived from the output
* current. The first method is reported to the PMBus core with PMBus page 0,
* the second method is reported with PMBus page 1.
*
* The chip supports reading per-phase temperatures and per-phase input/output
* currents for VCORE. Telemetry is reported in vendor specific registers.
* The driver translates the vendor specific register values to PMBus standard
* register values and reports per-phase information in PMBus page 0.
*
* Copyright 2019, 2020 Google LLC.
*/
#include <linux/bits.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "pmbus.h"
enum chips { max16508, max16600, max16601, max16602 };
#define REG_DEFAULT_NUM_POP 0xc4
#define REG_SETPT_DVID 0xd1
#define DAC_10MV_MODE BIT(4)
#define REG_IOUT_AVG_PK 0xee
#define REG_IIN_SENSOR 0xf1
#define REG_TOTAL_INPUT_POWER 0xf2
#define REG_PHASE_ID 0xf3
#define CORE_RAIL_INDICATOR BIT(7)
#define REG_PHASE_REPORTING 0xf4
#define MAX16601_NUM_PHASES 8
struct max16601_data {
enum chips id;
struct pmbus_driver_info info;
struct i2c_client *vsa;
int iout_avg_pkg;
};
#define to_max16601_data(x) container_of(x, struct max16601_data, info)
static int max16601_read_byte(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct max16601_data *data = to_max16601_data(info);
if (page > 0) {
if (page == 2) /* VSA */
return i2c_smbus_read_byte_data(data->vsa, reg);
return -EOPNOTSUPP;
}
return -ENODATA;
}
static int max16601_read_word(struct i2c_client *client, int page, int phase,
int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct max16601_data *data = to_max16601_data(info);
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
int ret;
switch (page) {
case 0: /* VCORE */
if (phase == 0xff)
return -ENODATA;
switch (reg) {
case PMBUS_READ_IIN:
case PMBUS_READ_IOUT:
case PMBUS_READ_TEMPERATURE_1:
ret = i2c_smbus_write_byte_data(client, REG_PHASE_ID,
phase);
if (ret)
return ret;
ret = i2c_smbus_read_block_data(client,
REG_PHASE_REPORTING,
buf);
if (ret < 0)
return ret;
if (ret < 6)
return -EIO;
switch (reg) {
case PMBUS_READ_TEMPERATURE_1:
return buf[1] << 8 | buf[0];
case PMBUS_READ_IOUT:
return buf[3] << 8 | buf[2];
case PMBUS_READ_IIN:
return buf[5] << 8 | buf[4];
default:
break;
}
}
return -EOPNOTSUPP;
case 1: /* VCORE, read IIN/PIN from sensor element */
switch (reg) {
case PMBUS_READ_IIN:
return i2c_smbus_read_word_data(client, REG_IIN_SENSOR);
case PMBUS_READ_PIN:
return i2c_smbus_read_word_data(client,
REG_TOTAL_INPUT_POWER);
default:
break;
}
return -EOPNOTSUPP;
case 2: /* VSA */
switch (reg) {
case PMBUS_VIRT_READ_IOUT_MAX:
ret = i2c_smbus_read_word_data(data->vsa,
REG_IOUT_AVG_PK);
if (ret < 0)
return ret;
if (sign_extend32(ret, 10) >
sign_extend32(data->iout_avg_pkg, 10))
data->iout_avg_pkg = ret;
return data->iout_avg_pkg;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
return 0;
case PMBUS_IOUT_OC_FAULT_LIMIT:
case PMBUS_IOUT_OC_WARN_LIMIT:
case PMBUS_OT_FAULT_LIMIT:
case PMBUS_OT_WARN_LIMIT:
case PMBUS_READ_IIN:
case PMBUS_READ_IOUT:
case PMBUS_READ_TEMPERATURE_1:
case PMBUS_STATUS_WORD:
return i2c_smbus_read_word_data(data->vsa, reg);
default:
return -EOPNOTSUPP;
}
default:
return -EOPNOTSUPP;
}
}
static int max16601_write_byte(struct i2c_client *client, int page, u8 reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct max16601_data *data = to_max16601_data(info);
if (page == 2) {
if (reg == PMBUS_CLEAR_FAULTS)
return i2c_smbus_write_byte(data->vsa, reg);
return -EOPNOTSUPP;
}
return -ENODATA;
}
static int max16601_write_word(struct i2c_client *client, int page, int reg,
u16 value)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct max16601_data *data = to_max16601_data(info);
switch (page) {
case 0: /* VCORE */
return -ENODATA;
case 1: /* VCORE IIN/PIN from sensor element */
default:
return -EOPNOTSUPP;
case 2: /* VSA */
switch (reg) {
case PMBUS_VIRT_RESET_IOUT_HISTORY:
data->iout_avg_pkg = 0xfc00;
return 0;
case PMBUS_IOUT_OC_FAULT_LIMIT:
case PMBUS_IOUT_OC_WARN_LIMIT:
case PMBUS_OT_FAULT_LIMIT:
case PMBUS_OT_WARN_LIMIT:
return i2c_smbus_write_word_data(data->vsa, reg, value);
default:
return -EOPNOTSUPP;
}
}
}
static int max16601_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
struct max16601_data *data = to_max16601_data(info);
int reg;
reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID);
if (reg < 0)
return reg;
if (reg & DAC_10MV_MODE)
info->vrm_version[0] = vr13;
else
info->vrm_version[0] = vr12;
if (data->id != max16600 && data->id != max16601 && data->id != max16602)
return 0;
reg = i2c_smbus_read_byte_data(client, REG_DEFAULT_NUM_POP);
if (reg < 0)
return reg;
/*
* If REG_DEFAULT_NUM_POP returns 0, we don't know how many phases
* are populated. Stick with the default in that case.
*/
reg &= 0x0f;
if (reg && reg <= MAX16601_NUM_PHASES)
info->phases[0] = reg;
return 0;
}
static struct pmbus_driver_info max16601_info = {
.pages = 3,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
.format[PSC_CURRENT_IN] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_POWER] = linear,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT | PMBUS_PAGE_VIRTUAL | PMBUS_PHASE_VIRTUAL,
.func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_PAGE_VIRTUAL,
.func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL,
.phases[0] = MAX16601_NUM_PHASES,
.pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[3] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.pfunc[4] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[5] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.pfunc[6] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[7] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.identify = max16601_identify,
.read_byte_data = max16601_read_byte,
.read_word_data = max16601_read_word,
.write_byte = max16601_write_byte,
.write_word_data = max16601_write_word,
};
static void max16601_remove(void *_data)
{
struct max16601_data *data = _data;
i2c_unregister_device(data->vsa);
}
static const struct i2c_device_id max16601_id[] = {
{"max16508", max16508},
{"max16600", max16600},
{"max16601", max16601},
{"max16602", max16602},
{}
};
MODULE_DEVICE_TABLE(i2c, max16601_id);
static int max16601_get_id(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
enum chips id;
int ret;
ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
if (ret < 0 || ret < 11)
return -ENODEV;
/*
* PMBUS_IC_DEVICE_ID is expected to return MAX1660[012]y.xx",
* "MAX16500y.xx".cdxxcccccccccc, or "MAX16508y.xx".
*/
if (!strncmp(buf, "MAX16500", 8) || !strncmp(buf, "MAX16508", 8)) {
id = max16508;
} else if (!strncmp(buf, "MAX16600", 8)) {
id = max16600;
} else if (!strncmp(buf, "MAX16601", 8)) {
id = max16601;
} else if (!strncmp(buf, "MAX16602", 8)) {
id = max16602;
} else {
buf[ret] = '\0';
dev_err(dev, "Unsupported chip '%s'\n", buf);
return -ENODEV;
}
return id;
}
static int max16601_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
const struct i2c_device_id *id;
struct max16601_data *data;
int ret, chip_id;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA |
I2C_FUNC_SMBUS_READ_BLOCK_DATA))
return -ENODEV;
chip_id = max16601_get_id(client);
if (chip_id < 0)
return chip_id;
id = i2c_match_id(max16601_id, client);
if (chip_id != id->driver_data)
dev_warn(&client->dev,
"Device mismatch: Configured %s (%d), detected %d\n",
id->name, (int) id->driver_data, chip_id);
ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID);
if (ret < 0)
return ret;
if (!(ret & CORE_RAIL_INDICATOR)) {
dev_err(dev,
"Driver must be instantiated on CORE rail I2C address\n");
return -ENODEV;
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->id = chip_id;
data->iout_avg_pkg = 0xfc00;
data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1);
if (IS_ERR(data->vsa)) {
dev_err(dev, "Failed to register VSA client\n");
return PTR_ERR(data->vsa);
}
ret = devm_add_action_or_reset(dev, max16601_remove, data);
if (ret)
return ret;
data->info = max16601_info;
return pmbus_do_probe(client, &data->info);
}
static struct i2c_driver max16601_driver = {
.driver = {
.name = "max16601",
},
.probe = max16601_probe,
.id_table = max16601_id,
};
module_i2c_driver(max16601_driver);
MODULE_AUTHOR("Guenter Roeck <[email protected]>");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX16601");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS("PMBUS");
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
#include "prestera_counter.h"
#include "prestera_router_hw.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
#define PRESTERA_MIN_MTU 64
#define PRESTERA_MSG_CHUNK_SIZE 1024
enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
PRESTERA_CMD_TYPE_COUNTER_GET = 0x510,
PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511,
PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512,
PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513,
PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514,
PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515,
PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540,
PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541,
PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550,
PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551,
PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560,
PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561,
PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902,
PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903,
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
PRESTERA_CMD_TYPE_POLICER_SET = 0x1502,
PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
PRESTERA_CMD_TYPE_ACK = 0x10000,
PRESTERA_CMD_TYPE_MAX
};
enum {
PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
PRESTERA_CMD_PORT_ATTR_MTU = 3,
PRESTERA_CMD_PORT_ATTR_MAC = 4,
PRESTERA_CMD_PORT_ATTR_SPEED = 5,
PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18,
PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19,
PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22,
};
enum {
PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
};
enum {
PRESTERA_CMD_ACK_OK,
PRESTERA_CMD_ACK_FAILED,
PRESTERA_CMD_ACK_MAX
};
enum {
PRESTERA_PORT_TP_NA,
PRESTERA_PORT_TP_MDI,
PRESTERA_PORT_TP_MDIX,
PRESTERA_PORT_TP_AUTO,
};
enum {
PRESTERA_PORT_FLOOD_TYPE_UC = 0,
PRESTERA_PORT_FLOOD_TYPE_MC = 1,
};
enum {
PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
PRESTERA_PORT_MC_PKTS_RCV_CNT,
PRESTERA_PORT_PKTS_64L_CNT,
PRESTERA_PORT_PKTS_65TO127L_CNT,
PRESTERA_PORT_PKTS_128TO255L_CNT,
PRESTERA_PORT_PKTS_256TO511L_CNT,
PRESTERA_PORT_PKTS_512TO1023L_CNT,
PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
PRESTERA_PORT_MC_PKTS_SENT_CNT,
PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
PRESTERA_PORT_FC_SENT_CNT,
PRESTERA_PORT_GOOD_FC_RCV_CNT,
PRESTERA_PORT_DROP_EVENTS_CNT,
PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
PRESTERA_PORT_OVERSIZE_PKTS_CNT,
PRESTERA_PORT_JABBER_PKTS_CNT,
PRESTERA_PORT_MAC_RCV_ERROR_CNT,
PRESTERA_PORT_BAD_CRC_CNT,
PRESTERA_PORT_COLLISIONS_CNT,
PRESTERA_PORT_LATE_COLLISIONS_CNT,
PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
PRESTERA_PORT_CNT_MAX
};
enum {
PRESTERA_FC_NONE,
PRESTERA_FC_SYMMETRIC,
PRESTERA_FC_ASYMMETRIC,
PRESTERA_FC_SYMM_ASYMM,
};
enum {
PRESTERA_POLICER_MODE_SR_TCM
};
enum {
PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
};
struct prestera_fw_event_handler {
struct list_head list;
struct rcu_head rcu;
enum prestera_event_type type;
prestera_event_cb_t func;
void *arg;
};
enum {
PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
};
struct prestera_msg_cmd {
__le32 type;
};
struct prestera_msg_ret {
struct prestera_msg_cmd cmd;
__le32 status;
};
struct prestera_msg_common_req {
struct prestera_msg_cmd cmd;
};
struct prestera_msg_common_resp {
struct prestera_msg_ret ret;
};
struct prestera_msg_switch_attr_req {
struct prestera_msg_cmd cmd;
__le32 attr;
union {
__le32 ageing_timeout_ms;
struct {
u8 mac[ETH_ALEN];
u8 __pad[2];
};
} param;
};
struct prestera_msg_switch_init_resp {
struct prestera_msg_ret ret;
__le32 port_count;
__le32 mtu_max;
__le32 size_tbl_router_nexthop;
u8 switch_id;
u8 lag_max;
u8 lag_member_max;
};
struct prestera_msg_event_port_param {
union {
struct {
__le32 mode;
__le32 speed;
u8 oper;
u8 duplex;
u8 fc;
u8 fec;
} mac;
struct {
__le64 lmode_bmap;
u8 mdix;
u8 fc;
u8 __pad[2];
} __packed phy; /* make sure always 12 bytes size */
};
};
struct prestera_msg_port_cap_param {
__le64 link_mode;
u8 type;
u8 fec;
u8 fc;
u8 transceiver;
};
struct prestera_msg_port_flood_param {
u8 type;
u8 enable;
u8 __pad[2];
};
union prestera_msg_port_param {
__le32 mtu;
__le32 speed;
__le32 link_mode;
u8 admin_state;
u8 oper_state;
u8 mac[ETH_ALEN];
u8 accept_frm_type;
u8 learning;
u8 flood;
u8 type;
u8 duplex;
u8 fec;
u8 fc;
u8 br_locked;
union {
struct {
u8 admin;
u8 fc;
u8 ap_enable;
u8 __reserved[5];
union {
struct {
__le32 mode;
__le32 speed;
u8 inband;
u8 duplex;
u8 fec;
u8 fec_supp;
} reg_mode;
struct {
__le32 mode;
__le32 speed;
u8 fec;
u8 fec_supp;
u8 __pad[2];
} ap_modes[PRESTERA_AP_PORT_MAX];
};
} mac;
struct {
__le64 modes;
__le32 mode;
u8 admin;
u8 adv_enable;
u8 mdix;
u8 __pad;
} phy;
} link;
struct prestera_msg_port_cap_param cap;
struct prestera_msg_port_flood_param flood_ext;
struct prestera_msg_event_port_param link_evt;
};
struct prestera_msg_port_attr_req {
struct prestera_msg_cmd cmd;
__le32 attr;
__le32 port;
__le32 dev;
union prestera_msg_port_param param;
};
struct prestera_msg_port_attr_resp {
struct prestera_msg_ret ret;
union prestera_msg_port_param param;
};
struct prestera_msg_port_stats_resp {
struct prestera_msg_ret ret;
__le64 stats[PRESTERA_PORT_CNT_MAX];
};
struct prestera_msg_port_info_req {
struct prestera_msg_cmd cmd;
__le32 port;
};
struct prestera_msg_port_info_resp {
struct prestera_msg_ret ret;
__le32 hw_id;
__le32 dev_id;
__le16 fp_id;
u8 pad[2];
};
struct prestera_msg_vlan_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
__le16 vid;
u8 is_member;
u8 is_tagged;
};
struct prestera_msg_fdb_req {
struct prestera_msg_cmd cmd;
__le32 flush_mode;
union {
struct {
__le32 port;
__le32 dev;
};
__le16 lag_id;
} dest;
__le16 vid;
u8 dest_type;
u8 dynamic;
u8 mac[ETH_ALEN];
u8 __pad[2];
};
struct prestera_msg_bridge_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
__le16 bridge;
u8 pad[2];
};
struct prestera_msg_bridge_resp {
struct prestera_msg_ret ret;
__le16 bridge;
u8 pad[2];
};
struct prestera_msg_vtcam_create_req {
struct prestera_msg_cmd cmd;
__le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
u8 direction;
u8 lookup;
u8 pad[2];
};
struct prestera_msg_vtcam_destroy_req {
struct prestera_msg_cmd cmd;
__le32 vtcam_id;
};
struct prestera_msg_vtcam_rule_del_req {
struct prestera_msg_cmd cmd;
__le32 vtcam_id;
__le32 id;
};
struct prestera_msg_vtcam_bind_req {
struct prestera_msg_cmd cmd;
union {
struct {
__le32 hw_id;
__le32 dev_id;
} port;
__le32 index;
};
__le32 vtcam_id;
__le16 pcl_id;
__le16 type;
};
struct prestera_msg_vtcam_resp {
struct prestera_msg_ret ret;
__le32 vtcam_id;
__le32 rule_id;
};
struct prestera_msg_acl_action {
__le32 id;
__le32 __reserved;
union {
struct {
__le32 index;
} jump;
struct {
__le32 id;
} police;
struct {
__le32 id;
} count;
__le32 reserved[6];
};
};
struct prestera_msg_vtcam_rule_add_req {
struct prestera_msg_cmd cmd;
__le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
__le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
__le32 vtcam_id;
__le32 prio;
__le32 n_act;
struct prestera_msg_acl_action actions_msg[] __counted_by_le(n_act);
};
struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
__le32 client;
__le32 block_id;
__le32 num_counters;
};
struct prestera_msg_counter_stats {
__le64 packets;
__le64 bytes;
};
struct prestera_msg_counter_resp {
struct prestera_msg_ret ret;
__le32 block_id;
__le32 offset;
__le32 num_counters;
__le32 done;
struct prestera_msg_counter_stats stats[];
};
struct prestera_msg_span_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
u8 id;
u8 pad[3];
};
struct prestera_msg_span_resp {
struct prestera_msg_ret ret;
u8 id;
u8 pad[3];
};
struct prestera_msg_stp_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
__le16 vid;
u8 state;
u8 __pad;
};
struct prestera_msg_rxtx_req {
struct prestera_msg_cmd cmd;
u8 use_sdma;
u8 pad[3];
};
struct prestera_msg_rxtx_resp {
struct prestera_msg_ret ret;
__le32 map_addr;
};
struct prestera_msg_iface {
union {
struct {
__le32 dev;
__le32 port;
};
__le16 lag_id;
};
__le16 vr_id;
__le16 vid;
u8 type;
u8 __pad[3];
};
struct prestera_msg_ip_addr {
union {
__be32 ipv4;
__be32 ipv6[4];
} u;
u8 v; /* e.g. PRESTERA_IPV4 */
u8 __pad[3];
};
struct prestera_msg_nh {
struct prestera_msg_iface oif;
__le32 hw_id;
u8 mac[ETH_ALEN];
u8 is_active;
u8 pad;
};
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
__le32 mtu;
__le16 rif_id;
__le16 __reserved;
u8 mac[ETH_ALEN];
u8 __pad[2];
};
struct prestera_msg_rif_resp {
struct prestera_msg_ret ret;
__le16 rif_id;
u8 __pad[2];
};
struct prestera_msg_lpm_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_ip_addr dst;
__le32 grp_id;
__le32 dst_len;
__le16 vr_id;
u8 __pad[2];
};
struct prestera_msg_nh_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
__le32 size;
__le32 grp_id;
};
struct prestera_msg_nh_chunk_req {
struct prestera_msg_cmd cmd;
__le32 offset;
};
struct prestera_msg_nh_chunk_resp {
struct prestera_msg_ret ret;
u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
};
struct prestera_msg_nh_grp_req {
struct prestera_msg_cmd cmd;
__le32 grp_id;
__le32 size;
};
struct prestera_msg_nh_grp_resp {
struct prestera_msg_ret ret;
__le32 grp_id;
};
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
u8 __pad[2];
};
struct prestera_msg_vr_resp {
struct prestera_msg_ret ret;
__le16 vr_id;
u8 __pad[2];
};
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
__le16 lag_id;
u8 pad[2];
};
struct prestera_msg_cpu_code_counter_req {
struct prestera_msg_cmd cmd;
u8 counter_type;
u8 code;
u8 pad[2];
};
struct mvsw_msg_cpu_code_counter_ret {
struct prestera_msg_ret ret;
__le64 packet_count;
};
struct prestera_msg_policer_req {
struct prestera_msg_cmd cmd;
__le32 id;
union {
struct {
__le64 cir;
__le32 cbs;
} __packed sr_tcm; /* make sure always 12 bytes size */
__le32 reserved[6];
};
u8 mode;
u8 type;
u8 pad[2];
};
struct prestera_msg_policer_resp {
struct prestera_msg_ret ret;
__le32 id;
};
struct prestera_msg_event {
__le16 type;
__le16 id;
};
struct prestera_msg_event_port {
struct prestera_msg_event id;
__le32 port_id;
struct prestera_msg_event_port_param param;
};
union prestera_msg_event_fdb_param {
u8 mac[ETH_ALEN];
};
struct prestera_msg_event_fdb {
struct prestera_msg_event id;
__le32 vid;
union {
__le32 port_id;
__le16 lag_id;
} dest;
union prestera_msg_event_fdb_param param;
u8 dest_type;
};
struct prestera_msg_flood_domain_create_req {
struct prestera_msg_cmd cmd;
};
struct prestera_msg_flood_domain_create_resp {
struct prestera_msg_ret ret;
__le32 flood_domain_idx;
};
struct prestera_msg_flood_domain_destroy_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
};
struct prestera_msg_flood_domain_ports_reset_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
};
struct prestera_msg_flood_domain_port {
union {
struct {
__le32 port_num;
__le32 dev_num;
};
__le16 lag_id;
};
__le16 vid;
__le16 port_type;
};
struct prestera_msg_flood_domain_ports_set_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
__le32 ports_num;
struct prestera_msg_flood_domain_port ports[] __counted_by_le(ports_num);
};
struct prestera_msg_mdb_create_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
__le16 vid;
u8 mac[ETH_ALEN];
};
struct prestera_msg_mdb_destroy_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
__le16 vid;
u8 mac[ETH_ALEN];
};
static void prestera_hw_build_tests(void)
{
/* check requests */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4);
BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 144);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32);
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 136);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20);
}
static u8 prestera_hw_mdix_to_eth(u8 mode);
static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause);
static int __prestera_cmd_ret(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
struct prestera_msg_cmd *cmd, size_t clen,
struct prestera_msg_ret *ret, size_t rlen,
int waitms)
{
struct prestera_device *dev = sw->dev;
int err;
cmd->type = __cpu_to_le32(type);
err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms);
if (err)
return err;
if (ret->cmd.type != __cpu_to_le32(PRESTERA_CMD_TYPE_ACK))
return -EBADE;
if (ret->status != __cpu_to_le32(PRESTERA_CMD_ACK_OK))
return -EINVAL;
return 0;
}
static int prestera_cmd_ret(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
struct prestera_msg_cmd *cmd, size_t clen,
struct prestera_msg_ret *ret, size_t rlen)
{
return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
}
static int prestera_cmd_ret_wait(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
struct prestera_msg_cmd *cmd, size_t clen,
struct prestera_msg_ret *ret, size_t rlen,
int waitms)
{
return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
}
static int prestera_cmd(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
struct prestera_msg_cmd *cmd, size_t clen)
{
struct prestera_msg_common_resp resp;
return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
}
static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
{
struct prestera_msg_event_port *hw_evt;
hw_evt = (struct prestera_msg_event_port *)msg;
evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id);
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
evt->port_evt.data.mac.oper = hw_evt->param.mac.oper;
evt->port_evt.data.mac.mode =
__le32_to_cpu(hw_evt->param.mac.mode);
evt->port_evt.data.mac.speed =
__le32_to_cpu(hw_evt->param.mac.speed);
evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex;
evt->port_evt.data.mac.fc = hw_evt->param.mac.fc;
evt->port_evt.data.mac.fec = hw_evt->param.mac.fec;
} else {
return -EINVAL;
}
return 0;
}
static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
{
struct prestera_msg_event_fdb *hw_evt = msg;
switch (hw_evt->dest_type) {
case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id);
break;
case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id);
break;
default:
return -EINVAL;
}
evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid);
ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
return 0;
}
static struct prestera_fw_evt_parser {
int (*func)(void *msg, struct prestera_event *evt);
} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
[PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
[PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
};
static struct prestera_fw_event_handler *
__find_event_handler(const struct prestera_switch *sw,
enum prestera_event_type type)
{
struct prestera_fw_event_handler *eh;
list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
if (eh->type == type)
return eh;
}
return NULL;
}
static int prestera_find_event_handler(const struct prestera_switch *sw,
enum prestera_event_type type,
struct prestera_fw_event_handler *eh)
{
struct prestera_fw_event_handler *tmp;
int err = 0;
rcu_read_lock();
tmp = __find_event_handler(sw, type);
if (tmp)
*eh = *tmp;
else
err = -ENOENT;
rcu_read_unlock();
return err;
}
static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
{
struct prestera_switch *sw = dev->priv;
struct prestera_msg_event *msg = buf;
struct prestera_fw_event_handler eh;
struct prestera_event evt;
u16 msg_type;
int err;
msg_type = __le16_to_cpu(msg->type);
if (msg_type >= PRESTERA_EVENT_TYPE_MAX)
return -EINVAL;
if (!fw_event_parsers[msg_type].func)
return -ENOENT;
err = prestera_find_event_handler(sw, msg_type, &eh);
if (err)
return err;
evt.id = __le16_to_cpu(msg->id);
err = fw_event_parsers[msg_type].func(buf, &evt);
if (err)
return err;
eh.func(sw, &evt, eh.arg);
return 0;
}
static void prestera_pkt_recv(struct prestera_device *dev)
{
struct prestera_switch *sw = dev->priv;
struct prestera_fw_event_handler eh;
struct prestera_event ev;
int err;
ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
if (err)
return;
eh.func(sw, &ev, eh.arg);
}
static u8 prestera_hw_mdix_to_eth(u8 mode)
{
switch (mode) {
case PRESTERA_PORT_TP_MDI:
return ETH_TP_MDI;
case PRESTERA_PORT_TP_MDIX:
return ETH_TP_MDI_X;
case PRESTERA_PORT_TP_AUTO:
return ETH_TP_MDI_AUTO;
default:
return ETH_TP_MDI_INVALID;
}
}
static u8 prestera_hw_mdix_from_eth(u8 mode)
{
switch (mode) {
case ETH_TP_MDI:
return PRESTERA_PORT_TP_MDI;
case ETH_TP_MDI_X:
return PRESTERA_PORT_TP_MDIX;
case ETH_TP_MDI_AUTO:
return PRESTERA_PORT_TP_AUTO;
default:
return PRESTERA_PORT_TP_NA;
}
}
int prestera_hw_port_info_get(const struct prestera_port *port,
u32 *dev_id, u32 *hw_id, u16 *fp_id)
{
struct prestera_msg_port_info_req req = {
.port = __cpu_to_le32(port->id),
};
struct prestera_msg_port_info_resp resp;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*dev_id = __le32_to_cpu(resp.dev_id);
*hw_id = __le32_to_cpu(resp.hw_id);
*fp_id = __le16_to_cpu(resp.fp_id);
return 0;
}
int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
{
struct prestera_msg_switch_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC),
};
ether_addr_copy(req.param.mac, mac);
return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_switch_init(struct prestera_switch *sw)
{
struct prestera_msg_switch_init_resp resp;
struct prestera_msg_common_req req;
int err;
INIT_LIST_HEAD(&sw->event_handlers);
prestera_hw_build_tests();
err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
&req.cmd, sizeof(req),
&resp.ret, sizeof(resp),
PRESTERA_SWITCH_INIT_TIMEOUT_MS);
if (err)
return err;
sw->dev->recv_msg = prestera_evt_recv;
sw->dev->recv_pkt = prestera_pkt_recv;
sw->port_count = __le32_to_cpu(resp.port_count);
sw->mtu_min = PRESTERA_MIN_MTU;
sw->mtu_max = __le32_to_cpu(resp.mtu_max);
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
sw->size_tbl_router_nexthop =
__le32_to_cpu(resp.size_tbl_router_nexthop);
return 0;
}
void prestera_hw_switch_fini(struct prestera_switch *sw)
{
WARN_ON(!list_empty(&sw->event_handlers));
}
int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
{
struct prestera_msg_switch_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING),
.param = {
.ageing_timeout_ms = __cpu_to_le32(ageing_ms),
},
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
u32 *mode, u32 *speed, u8 *duplex, u8 *fec)
{
struct prestera_msg_port_attr_resp resp;
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id)
};
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
if (mode)
*mode = __le32_to_cpu(resp.param.link_evt.mac.mode);
if (speed)
*speed = __le32_to_cpu(resp.param.link_evt.mac.speed);
if (duplex)
*duplex = resp.param.link_evt.mac.duplex;
if (fec)
*fec = resp.param.link_evt.mac.fec;
return err;
}
int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
bool admin, u32 mode, u8 inband,
u32 speed, u8 duplex, u8 fec)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.link = {
.mac = {
.admin = admin,
.reg_mode.mode = __cpu_to_le32(mode),
.reg_mode.inband = inband,
.reg_mode.speed = __cpu_to_le32(speed),
.reg_mode.duplex = duplex,
.reg_mode.fec = fec
}
}
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
u8 *mdix, u64 *lmode_bmap,
bool *fc_pause, bool *fc_asym)
{
struct prestera_msg_port_attr_resp resp;
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id)
};
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
if (mdix)
*mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix);
if (lmode_bmap)
*lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap);
if (fc_pause && fc_asym)
prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc,
fc_pause, fc_asym);
return err;
}
int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
bool admin, bool adv, u32 mode, u64 modes,
u8 mdix)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.link = {
.phy = {
.admin = admin,
.adv_enable = adv ? 1 : 0,
.mode = __cpu_to_le32(mode),
.modes = __cpu_to_le64(modes),
}
}
}
};
req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix);
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.mtu = __cpu_to_le32(mtu),
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
ether_addr_copy(req.param.mac, mac);
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.accept_frm_type = type,
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_cap_get(const struct prestera_port *port,
struct prestera_port_caps *caps)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode);
caps->transceiver = resp.param.cap.transceiver;
caps->supp_fec = resp.param.cap.fec;
caps->type = resp.param.cap.type;
return err;
}
static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
{
switch (fc) {
case PRESTERA_FC_SYMMETRIC:
*pause = true;
*asym_pause = false;
break;
case PRESTERA_FC_ASYMMETRIC:
*pause = false;
*asym_pause = true;
break;
case PRESTERA_FC_SYMM_ASYMM:
*pause = true;
*asym_pause = true;
break;
default:
*pause = false;
*asym_pause = false;
}
}
int prestera_hw_vtcam_create(struct prestera_switch *sw,
u8 lookup, const u32 *keymask, u32 *vtcam_id,
enum prestera_hw_vtcam_direction_t dir)
{
int err;
struct prestera_msg_vtcam_resp resp;
struct prestera_msg_vtcam_create_req req = {
.lookup = lookup,
.direction = dir,
};
if (keymask)
memcpy(req.keymask, keymask, sizeof(req.keymask));
else
memset(req.keymask, 0, sizeof(req.keymask));
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*vtcam_id = __le32_to_cpu(resp.vtcam_id);
return 0;
}
int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id)
{
struct prestera_msg_vtcam_destroy_req req = {
.vtcam_id = __cpu_to_le32(vtcam_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY,
&req.cmd, sizeof(req));
}
static int
prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
struct prestera_acl_hw_action_info *info)
{
action->id = __cpu_to_le32(info->id);
switch (info->id) {
case PRESTERA_ACL_RULE_ACTION_ACCEPT:
case PRESTERA_ACL_RULE_ACTION_DROP:
case PRESTERA_ACL_RULE_ACTION_TRAP:
/* just rule action id, no specific data */
break;
case PRESTERA_ACL_RULE_ACTION_JUMP:
action->jump.index = __cpu_to_le32(info->jump.index);
break;
case PRESTERA_ACL_RULE_ACTION_POLICE:
action->police.id = __cpu_to_le32(info->police.id);
break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
default:
return -EINVAL;
}
return 0;
}
int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
u32 vtcam_id, u32 prio, void *key, void *keymask,
struct prestera_acl_hw_action_info *act,
u8 n_act, u32 *rule_id)
{
struct prestera_msg_vtcam_rule_add_req *req;
struct prestera_msg_vtcam_resp resp;
size_t size;
int err;
u8 i;
size = struct_size(req, actions_msg, n_act);
req = kzalloc(size, GFP_KERNEL);
if (!req)
return -ENOMEM;
req->n_act = __cpu_to_le32(n_act);
/* put acl matches into the message */
memcpy(req->key, key, sizeof(req->key));
memcpy(req->keymask, keymask, sizeof(req->keymask));
/* put acl actions into the message */
for (i = 0; i < n_act; i++) {
err = prestera_acl_rule_add_put_action(&req->actions_msg[i],
&act[i]);
if (err)
goto free_buff;
}
req->vtcam_id = __cpu_to_le32(vtcam_id);
req->prio = __cpu_to_le32(prio);
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD,
&req->cmd, size, &resp.ret, sizeof(resp));
if (err)
goto free_buff;
*rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
kfree(req);
return err;
}
int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
u32 vtcam_id, u32 rule_id)
{
struct prestera_msg_vtcam_rule_del_req req = {
.vtcam_id = __cpu_to_le32(vtcam_id),
.id = __cpu_to_le32(rule_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
struct prestera_acl_iface *iface,
u32 vtcam_id, u16 pcl_id)
{
struct prestera_msg_vtcam_bind_req req = {
.vtcam_id = __cpu_to_le32(vtcam_id),
.type = __cpu_to_le16(iface->type),
.pcl_id = __cpu_to_le16(pcl_id)
};
if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
} else {
req.index = __cpu_to_le32(iface->index);
}
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND,
&req.cmd, sizeof(req));
}
int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
struct prestera_acl_iface *iface,
u32 vtcam_id)
{
struct prestera_msg_vtcam_bind_req req = {
.vtcam_id = __cpu_to_le32(vtcam_id),
.type = __cpu_to_le16(iface->type)
};
if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
} else {
req.index = __cpu_to_le32(iface->index);
}
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND,
&req.cmd, sizeof(req));
}
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
{
struct prestera_msg_span_resp resp;
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*span_id = resp.id;
return 0;
}
int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
enum prestera_cmd_type_t cmd_type;
if (ingress)
cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
else
cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
enum prestera_cmd_type_t cmd_type;
if (ingress)
cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
else
cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
{
struct prestera_msg_span_req req = {
.id = span_id
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE,
&req.cmd, sizeof(req));
}
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*type = resp.param.type;
return 0;
}
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*speed = __le32_to_cpu(resp.param.speed);
return 0;
}
int prestera_hw_port_autoneg_restart(struct prestera_port *port)
{
struct prestera_msg_port_attr_req req = {
.attr =
__cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *st)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_stats_resp resp;
__le64 *hw = resp.stats;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
st->good_octets_received =
__le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]);
st->bad_octets_received =
__le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]);
st->mac_trans_error =
__le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]);
st->broadcast_frames_received =
__le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]);
st->multicast_frames_received =
__le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]);
st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]);
st->frames_65_to_127_octets =
__le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]);
st->frames_128_to_255_octets =
__le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]);
st->frames_256_to_511_octets =
__le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]);
st->frames_512_to_1023_octets =
__le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]);
st->frames_1024_to_max_octets =
__le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]);
st->excessive_collision =
__le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]);
st->multicast_frames_sent =
__le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]);
st->broadcast_frames_sent =
__le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]);
st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]);
st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]);
st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]);
st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]);
st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]);
st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]);
st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]);
st->rx_error_frame_received =
__le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]);
st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]);
st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]);
st->late_collision =
__le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]);
st->unicast_frames_received =
__le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]);
st->unicast_frames_sent =
__le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]);
st->sent_multiple =
__le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]);
st->sent_deferred =
__le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]);
st->good_octets_sent =
__le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]);
return 0;
}
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.learning = enable,
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_UC,
.enable = flood,
}
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_MC,
.enable = flood,
}
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_port_br_locked_set(const struct prestera_port *port,
bool br_locked)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
.br_locked = br_locked,
}
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
.vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
&req.cmd, sizeof(req));
}
int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
.vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
bool is_member, bool untagged)
{
struct prestera_msg_vlan_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.vid = __cpu_to_le16(vid),
.is_member = is_member,
.is_tagged = !untagged,
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
{
struct prestera_msg_vlan_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.vid = __cpu_to_le16(vid),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
{
struct prestera_msg_stp_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.vid = __cpu_to_le16(vid),
.state = state,
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
u16 vid, bool dynamic)
{
struct prestera_msg_fdb_req req = {
.dest = {
.dev = __cpu_to_le32(port->dev_id),
.port = __cpu_to_le32(port->hw_id),
},
.vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
ether_addr_copy(req.mac, mac);
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
u16 vid)
{
struct prestera_msg_fdb_req req = {
.dest = {
.dev = __cpu_to_le32(port->dev_id),
.port = __cpu_to_le32(port->hw_id),
},
.vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
const unsigned char *mac, u16 vid, bool dynamic)
{
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
.lag_id = __cpu_to_le16(lag_id),
},
.vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
ether_addr_copy(req.mac, mac);
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD,
&req.cmd, sizeof(req));
}
int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
const unsigned char *mac, u16 vid)
{
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
.lag_id = __cpu_to_le16(lag_id),
},
.vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest = {
.dev = __cpu_to_le32(port->dev_id),
.port = __cpu_to_le32(port->hw_id),
},
.flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
{
struct prestera_msg_fdb_req req = {
.vid = __cpu_to_le16(vid),
.flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest = {
.dev = __cpu_to_le32(port->dev_id),
.port = __cpu_to_le32(port->hw_id),
},
.vid = __cpu_to_le16(vid),
.flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
.lag_id = __cpu_to_le16(lag_id),
},
.flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
&req.cmd, sizeof(req));
}
int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
u16 lag_id, u16 vid, u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
.lag_id = __cpu_to_le16(lag_id),
},
.vid = __cpu_to_le16(vid),
.flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
&req.cmd, sizeof(req));
}
int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
{
struct prestera_msg_bridge_resp resp;
struct prestera_msg_bridge_req req;
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
&req.cmd, sizeof(req),
&resp.ret, sizeof(resp));
if (err)
return err;
*bridge_id = __le16_to_cpu(resp.bridge);
return 0;
}
int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
.bridge = __cpu_to_le16(bridge_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
.bridge = __cpu_to_le16(bridge_id),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
&req.cmd, sizeof(req));
}
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
.bridge = __cpu_to_le16(bridge_id),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
&req.cmd, sizeof(req));
}
static int prestera_iface_to_msg(struct prestera_iface *iface,
struct prestera_msg_iface *msg_if)
{
switch (iface->type) {
case PRESTERA_IF_PORT_E:
case PRESTERA_IF_VID_E:
msg_if->port = __cpu_to_le32(iface->dev_port.port_num);
msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num);
break;
case PRESTERA_IF_LAG_E:
msg_if->lag_id = __cpu_to_le16(iface->lag_id);
break;
default:
return -EOPNOTSUPP;
}
msg_if->vr_id = __cpu_to_le16(iface->vr_id);
msg_if->vid = __cpu_to_le16(iface->vlan_id);
msg_if->type = iface->type;
return 0;
}
int prestera_hw_rif_create(struct prestera_switch *sw,
struct prestera_iface *iif, u8 *mac, u16 *rif_id)
{
struct prestera_msg_rif_resp resp;
struct prestera_msg_rif_req req;
int err;
memcpy(req.mac, mac, ETH_ALEN);
err = prestera_iface_to_msg(iif, &req.iif);
if (err)
return err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*rif_id = __le16_to_cpu(resp.rif_id);
return err;
}
int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
struct prestera_iface *iif)
{
struct prestera_msg_rif_req req = {
.rif_id = __cpu_to_le16(rif_id),
};
int err;
err = prestera_iface_to_msg(iif, &req.iif);
if (err)
return err;
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd,
sizeof(req));
}
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
{
struct prestera_msg_vr_resp resp;
struct prestera_msg_vr_req req;
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*vr_id = __le16_to_cpu(resp.vr_id);
return err;
}
int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
{
struct prestera_msg_vr_req req = {
.vr_id = __cpu_to_le16(vr_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd,
sizeof(req));
}
int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
__be32 dst, u32 dst_len, u32 grp_id)
{
struct prestera_msg_lpm_req req = {
.dst_len = __cpu_to_le32(dst_len),
.vr_id = __cpu_to_le16(vr_id),
.grp_id = __cpu_to_le32(grp_id),
.dst.u.ipv4 = dst
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
sizeof(req));
}
int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
__be32 dst, u32 dst_len)
{
struct prestera_msg_lpm_req req = {
.dst_len = __cpu_to_le32(dst_len),
.vr_id = __cpu_to_le16(vr_id),
.dst.u.ipv4 = dst
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
sizeof(req));
}
int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
struct prestera_neigh_info *nhs, u32 grp_id)
{
struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
.grp_id = __cpu_to_le32(grp_id) };
int i, err;
for (i = 0; i < count; i++) {
req.nh[i].is_active = nhs[i].connected;
memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
if (err)
return err;
}
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
sizeof(req));
}
int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
u8 *hw_state, u32 buf_size /* Buffer in bytes */)
{
static struct prestera_msg_nh_chunk_resp resp;
struct prestera_msg_nh_chunk_req req;
u32 buf_offset;
int err;
memset(&hw_state[0], 0, buf_size);
buf_offset = 0;
while (1) {
if (buf_offset >= buf_size)
break;
memset(&req, 0, sizeof(req));
req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
err = prestera_cmd_ret(sw,
PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
&req.cmd, sizeof(req), &resp.ret,
sizeof(resp));
if (err)
return err;
memcpy(&hw_state[buf_offset], &resp.hw_state[0],
buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
buf_offset += PRESTERA_MSG_CHUNK_SIZE;
}
return 0;
}
int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
u32 *grp_id)
{
struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
struct prestera_msg_nh_grp_resp resp;
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*grp_id = __le32_to_cpu(resp.grp_id);
return err;
}
int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
u32 grp_id)
{
struct prestera_msg_nh_grp_req req = {
.grp_id = __cpu_to_le32(grp_id),
.size = __cpu_to_le32(nh_count)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
struct prestera_msg_rxtx_resp resp;
struct prestera_msg_rxtx_req req;
int err;
req.use_sdma = params->use_sdma;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
params->map_addr = __le32_to_cpu(resp.map_addr);
return 0;
}
int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
&req.cmd, sizeof(req));
}
int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
&req.cmd, sizeof(req));
}
int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
bool enable)
{
struct prestera_msg_lag_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.lag_id = __cpu_to_le16(lag_id),
};
u32 cmd;
cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE :
PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE;
return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req));
}
int
prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
enum prestera_hw_cpu_code_cnt_t counter_type,
u64 *packet_count)
{
struct prestera_msg_cpu_code_counter_req req = {
.counter_type = counter_type,
.code = code,
};
struct mvsw_msg_cpu_code_counter_ret resp;
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*packet_count = __le64_to_cpu(resp.packet_count);
return 0;
}
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
prestera_event_cb_t fn,
void *arg)
{
struct prestera_fw_event_handler *eh;
eh = __find_event_handler(sw, type);
if (eh)
return -EEXIST;
eh = kmalloc(sizeof(*eh), GFP_KERNEL);
if (!eh)
return -ENOMEM;
eh->type = type;
eh->func = fn;
eh->arg = arg;
INIT_LIST_HEAD(&eh->list);
list_add_rcu(&eh->list, &sw->event_handlers);
return 0;
}
void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
enum prestera_event_type type,
prestera_event_cb_t fn)
{
struct prestera_fw_event_handler *eh;
eh = __find_event_handler(sw, type);
if (!eh)
return;
list_del_rcu(&eh->list);
kfree_rcu(eh, rcu);
}
int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id)
{
struct prestera_msg_counter_req req = {
.block_id = __cpu_to_le32(block_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER,
&req.cmd, sizeof(req));
}
int prestera_hw_counter_abort(struct prestera_switch *sw)
{
struct prestera_msg_counter_req req;
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT,
&req.cmd, sizeof(req));
}
int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
u32 *len, bool *done,
struct prestera_counter_stats *stats)
{
struct prestera_msg_counter_resp *resp;
struct prestera_msg_counter_req req = {
.block_id = __cpu_to_le32(idx),
.num_counters = __cpu_to_le32(*len),
};
size_t size = struct_size(resp, stats, *len);
int err, i;
resp = kmalloc(size, GFP_KERNEL);
if (!resp)
return -ENOMEM;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET,
&req.cmd, sizeof(req), &resp->ret, size);
if (err)
goto free_buff;
for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) {
stats[i].packets += __le64_to_cpu(resp->stats[i].packets);
stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes);
}
*len = __le32_to_cpu(resp->num_counters);
*done = __le32_to_cpu(resp->done);
free_buff:
kfree(resp);
return err;
}
int prestera_hw_counter_block_get(struct prestera_switch *sw,
u32 client, u32 *block_id, u32 *offset,
u32 *num_counters)
{
struct prestera_msg_counter_resp resp;
struct prestera_msg_counter_req req = {
.client = __cpu_to_le32(client)
};
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*block_id = __le32_to_cpu(resp.block_id);
*offset = __le32_to_cpu(resp.offset);
*num_counters = __le32_to_cpu(resp.num_counters);
return 0;
}
int prestera_hw_counter_block_release(struct prestera_switch *sw,
u32 block_id)
{
struct prestera_msg_counter_req req = {
.block_id = __cpu_to_le32(block_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE,
&req.cmd, sizeof(req));
}
int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
u32 counter_id)
{
struct prestera_msg_counter_req req = {
.block_id = __cpu_to_le32(block_id),
.num_counters = __cpu_to_le32(counter_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
&req.cmd, sizeof(req));
}
int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
u32 *policer_id)
{
struct prestera_msg_policer_resp resp;
struct prestera_msg_policer_req req = {
.type = type
};
int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
*policer_id = __le32_to_cpu(resp.id);
return 0;
}
int prestera_hw_policer_release(struct prestera_switch *sw,
u32 policer_id)
{
struct prestera_msg_policer_req req = {
.id = __cpu_to_le32(policer_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE,
&req.cmd, sizeof(req));
}
int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
u32 policer_id, u64 cir, u32 cbs)
{
struct prestera_msg_policer_req req = {
.mode = PRESTERA_POLICER_MODE_SR_TCM,
.id = __cpu_to_le32(policer_id),
.sr_tcm = {
.cir = __cpu_to_le64(cir),
.cbs = __cpu_to_le32(cbs)
}
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
&req.cmd, sizeof(req));
}
int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
{
struct prestera_msg_flood_domain_create_resp resp;
struct prestera_msg_flood_domain_create_req req;
int err;
err = prestera_cmd_ret(domain->sw,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
domain->idx = __le32_to_cpu(resp.flood_domain_idx);
return 0;
}
int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
{
struct prestera_msg_flood_domain_destroy_req req = {
.flood_domain_idx = __cpu_to_le32(domain->idx),
};
return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
&req.cmd, sizeof(req));
}
int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
{
struct prestera_flood_domain_port *flood_domain_port;
struct prestera_msg_flood_domain_ports_set_req *req;
struct prestera_switch *sw = domain->sw;
struct prestera_port *port;
u32 ports_num = 0;
size_t buf_size;
u16 lag_id;
int err;
int i = 0;
list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
flood_domain_port_node)
ports_num++;
if (!ports_num)
return -EINVAL;
buf_size = struct_size(req, ports, ports_num);
req = kmalloc(buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
req->flood_domain_idx = __cpu_to_le32(domain->idx);
req->ports_num = __cpu_to_le32(ports_num);
list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
flood_domain_port_node) {
if (netif_is_lag_master(flood_domain_port->dev)) {
if (prestera_lag_id(sw, flood_domain_port->dev,
&lag_id)) {
kfree(req);
return -EINVAL;
}
req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
req->ports[i].lag_id = __cpu_to_le16(lag_id);
} else {
port = prestera_port_dev_lower_find(flood_domain_port->dev);
req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
req->ports[i].dev_num = __cpu_to_le32(port->dev_id);
req->ports[i].port_num = __cpu_to_le32(port->hw_id);
}
req->ports[i].vid = __cpu_to_le16(flood_domain_port->vid);
i++;
}
err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
&req->cmd, buf_size);
kfree(req);
return err;
}
int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
{
struct prestera_msg_flood_domain_ports_reset_req req = {
.flood_domain_idx = __cpu_to_le32(domain->idx),
};
return prestera_cmd(domain->sw,
PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
sizeof(req));
}
int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
{
struct prestera_msg_mdb_create_req req = {
.flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
.vid = __cpu_to_le16(mdb->vid),
};
memcpy(req.mac, mdb->addr, ETH_ALEN);
return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
sizeof(req));
}
int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
{
struct prestera_msg_mdb_destroy_req req = {
.flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
.vid = __cpu_to_le16(mdb->vid),
};
memcpy(req.mac, mdb->addr, ETH_ALEN);
return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
sizeof(req));
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) BitBox Ltd 2010
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/platform_data/asoc-imx-ssi.h>
#include "irq-common.h"
int mxc_set_irq_fiq(unsigned int irq, unsigned int type)
{
struct irq_chip_generic *gc;
struct mxc_extra_irq *exirq;
int ret;
ret = -ENOSYS;
gc = irq_get_chip_data(irq);
if (gc && gc->private) {
exirq = gc->private;
if (exirq->set_irq_fiq) {
struct irq_data *d = irq_get_irq_data(irq);
ret = exirq->set_irq_fiq(irqd_to_hwirq(d), type);
}
}
return ret;
}
EXPORT_SYMBOL(mxc_set_irq_fiq);
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 David Vernet <[email protected]>
* Copyright (c) 2024 Tejun Heo <[email protected]>
*/
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <libgen.h>
#include <bpf/bpf.h>
#include "scx_test.h"
const char help_fmt[] =
"The runner for sched_ext tests.\n"
"\n"
"The runner is statically linked against all testcases, and runs them all serially.\n"
"It's required for the testcases to be serial, as only a single host-wide sched_ext\n"
"scheduler may be loaded at any given time."
"\n"
"Usage: %s [-t TEST] [-h]\n"
"\n"
" -t TEST Only run tests whose name includes this string\n"
" -s Include print output for skipped tests\n"
" -q Don't print the test descriptions during run\n"
" -h Display this help and exit\n";
static volatile int exit_req;
static bool quiet, print_skipped;
#define MAX_SCX_TESTS 2048
static struct scx_test __scx_tests[MAX_SCX_TESTS];
static unsigned __scx_num_tests = 0;
static void sigint_handler(int simple)
{
exit_req = 1;
}
static void print_test_preamble(const struct scx_test *test, bool quiet)
{
printf("===== START =====\n");
printf("TEST: %s\n", test->name);
if (!quiet)
printf("DESCRIPTION: %s\n", test->description);
printf("OUTPUT:\n");
}
static const char *status_to_result(enum scx_test_status status)
{
switch (status) {
case SCX_TEST_PASS:
case SCX_TEST_SKIP:
return "ok";
case SCX_TEST_FAIL:
return "not ok";
default:
return "<UNKNOWN>";
}
}
static void print_test_result(const struct scx_test *test,
enum scx_test_status status,
unsigned int testnum)
{
const char *result = status_to_result(status);
const char *directive = status == SCX_TEST_SKIP ? "SKIP " : "";
printf("%s %u %s # %s\n", result, testnum, test->name, directive);
printf("===== END =====\n");
}
static bool should_skip_test(const struct scx_test *test, const char * filter)
{
return !strstr(test->name, filter);
}
static enum scx_test_status run_test(const struct scx_test *test)
{
enum scx_test_status status;
void *context = NULL;
if (test->setup) {
status = test->setup(&context);
if (status != SCX_TEST_PASS)
return status;
}
status = test->run(context);
if (test->cleanup)
test->cleanup(context);
return status;
}
static bool test_valid(const struct scx_test *test)
{
if (!test) {
fprintf(stderr, "NULL test detected\n");
return false;
}
if (!test->name) {
fprintf(stderr,
"Test with no name found. Must specify test name.\n");
return false;
}
if (!test->description) {
fprintf(stderr, "Test %s requires description.\n", test->name);
return false;
}
if (!test->run) {
fprintf(stderr, "Test %s has no run() callback\n", test->name);
return false;
}
return true;
}
int main(int argc, char **argv)
{
const char *filter = NULL;
unsigned testnum = 0, i;
unsigned passed = 0, skipped = 0, failed = 0;
int opt;
signal(SIGINT, sigint_handler);
signal(SIGTERM, sigint_handler);
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
while ((opt = getopt(argc, argv, "qst:h")) != -1) {
switch (opt) {
case 'q':
quiet = true;
break;
case 's':
print_skipped = true;
break;
case 't':
filter = optarg;
break;
default:
fprintf(stderr, help_fmt, basename(argv[0]));
return opt != 'h';
}
}
for (i = 0; i < __scx_num_tests; i++) {
enum scx_test_status status;
struct scx_test *test = &__scx_tests[i];
if (filter && should_skip_test(test, filter)) {
/*
* Printing the skipped tests and their preambles can
* add a lot of noise to the runner output. Printing
* this is only really useful for CI, so let's skip it
* by default.
*/
if (print_skipped) {
print_test_preamble(test, quiet);
print_test_result(test, SCX_TEST_SKIP, ++testnum);
}
continue;
}
print_test_preamble(test, quiet);
status = run_test(test);
print_test_result(test, status, ++testnum);
switch (status) {
case SCX_TEST_PASS:
passed++;
break;
case SCX_TEST_SKIP:
skipped++;
break;
case SCX_TEST_FAIL:
failed++;
break;
}
}
printf("\n\n=============================\n\n");
printf("RESULTS:\n\n");
printf("PASSED: %u\n", passed);
printf("SKIPPED: %u\n", skipped);
printf("FAILED: %u\n", failed);
return 0;
}
void scx_test_register(struct scx_test *test)
{
SCX_BUG_ON(!test_valid(test), "Invalid test found");
SCX_BUG_ON(__scx_num_tests >= MAX_SCX_TESTS, "Maximum tests exceeded");
__scx_tests[__scx_num_tests++] = *test;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for MPC52xx processor BestComm peripheral controller
*
* Copyright (C) 2006-2007 Sylvain Munaut <[email protected]>
* Copyright (C) 2005 Varma Electronics Oy,
* ( by Andrey Volkov <[email protected]> )
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
* ( by Dale Farnsworth <[email protected]> )
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc52xx.h>
#include <linux/fsl/bestcomm/sram.h>
#include <linux/fsl/bestcomm/bestcomm_priv.h>
#include "linux/fsl/bestcomm/bestcomm.h"
#define DRIVER_NAME "bestcomm-core"
/* MPC5200 device tree match tables */
static const struct of_device_id mpc52xx_sram_ids[] = {
{ .compatible = "fsl,mpc5200-sram", },
{ .compatible = "mpc5200-sram", },
{}
};
struct bcom_engine *bcom_eng = NULL;
EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
/* ======================================================================== */
/* Public and private API */
/* ======================================================================== */
/* Private API */
struct bcom_task *
bcom_task_alloc(int bd_count, int bd_size, int priv_size)
{
int i, tasknum = -1;
struct bcom_task *tsk;
/* Don't try to do anything if bestcomm init failed */
if (!bcom_eng)
return NULL;
/* Get and reserve a task num */
spin_lock(&bcom_eng->lock);
for (i=0; i<BCOM_MAX_TASKS; i++)
if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
tasknum = i;
break;
}
spin_unlock(&bcom_eng->lock);
if (tasknum < 0)
return NULL;
/* Allocate our structure */
tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
if (!tsk)
goto error;
tsk->tasknum = tasknum;
if (priv_size)
tsk->priv = (void*)tsk + sizeof(struct bcom_task);
/* Get IRQ of that task */
tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
if (!tsk->irq)
goto error;
/* Init the BDs, if needed */
if (bd_count) {
tsk->cookie = kmalloc_array(bd_count, sizeof(void *),
GFP_KERNEL);
if (!tsk->cookie)
goto error;
tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
if (!tsk->bd)
goto error;
memset_io(tsk->bd, 0x00, bd_count * bd_size);
tsk->num_bd = bd_count;
tsk->bd_size = bd_size;
}
return tsk;
error:
if (tsk) {
if (tsk->irq)
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
kfree(tsk);
}
bcom_eng->tdt[tasknum].stop = 0;
return NULL;
}
EXPORT_SYMBOL_GPL(bcom_task_alloc);
void
bcom_task_free(struct bcom_task *tsk)
{
/* Stop the task */
bcom_disable_task(tsk->tasknum);
/* Clear TDT */
bcom_eng->tdt[tsk->tasknum].start = 0;
bcom_eng->tdt[tsk->tasknum].stop = 0;
/* Free everything */
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
kfree(tsk);
}
EXPORT_SYMBOL_GPL(bcom_task_free);
int
bcom_load_image(int task, u32 *task_image)
{
struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
struct bcom_tdt *tdt;
u32 *desc, *var, *inc;
u32 *desc_src, *var_src, *inc_src;
/* Safety checks */
if (hdr->magic != BCOM_TASK_MAGIC) {
printk(KERN_ERR DRIVER_NAME
": Trying to load invalid microcode\n");
return -EINVAL;
}
if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
printk(KERN_ERR DRIVER_NAME
": Trying to load invalid task %d\n", task);
return -EINVAL;
}
/* Initial load or reload */
tdt = &bcom_eng->tdt[task];
if (tdt->start) {
desc = bcom_task_desc(task);
if (hdr->desc_size != bcom_task_num_descs(task)) {
printk(KERN_ERR DRIVER_NAME
": Trying to reload wrong task image "
"(%d size %d/%d)!\n",
task,
hdr->desc_size,
bcom_task_num_descs(task));
return -EINVAL;
}
} else {
phys_addr_t start_pa;
desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
if (!desc)
return -ENOMEM;
tdt->start = start_pa;
tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
}
var = bcom_task_var(task);
inc = bcom_task_inc(task);
/* Clear & copy */
memset_io(var, 0x00, BCOM_VAR_SIZE);
memset_io(inc, 0x00, BCOM_INC_SIZE);
desc_src = (u32 *)(hdr + 1);
var_src = desc_src + hdr->desc_size;
inc_src = var_src + hdr->var_size;
memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32));
memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32));
return 0;
}
EXPORT_SYMBOL_GPL(bcom_load_image);
void
bcom_set_initiator(int task, int initiator)
{
int i;
int num_descs;
u32 *desc;
int next_drd_has_initiator;
bcom_set_tcr_initiator(task, initiator);
/* Just setting tcr is apparently not enough due to some problem */
/* with it. So we just go thru all the microcode and replace in */
/* the DRD directly */
desc = bcom_task_desc(task);
next_drd_has_initiator = 1;
num_descs = bcom_task_num_descs(task);
for (i=0; i<num_descs; i++, desc++) {
if (!bcom_desc_is_drd(*desc))
continue;
if (next_drd_has_initiator)
if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
bcom_set_desc_initiator(desc, initiator);
next_drd_has_initiator = !bcom_drd_is_extended(*desc);
}
}
EXPORT_SYMBOL_GPL(bcom_set_initiator);
/* Public API */
void
bcom_enable(struct bcom_task *tsk)
{
bcom_enable_task(tsk->tasknum);
}
EXPORT_SYMBOL_GPL(bcom_enable);
void
bcom_disable(struct bcom_task *tsk)
{
bcom_disable_task(tsk->tasknum);
}
EXPORT_SYMBOL_GPL(bcom_disable);
/* ======================================================================== */
/* Engine init/cleanup */
/* ======================================================================== */
/* Function Descriptor table */
/* this will need to be updated if Freescale changes their task code FDT */
static u32 fdt_ops[] = {
0xa0045670, /* FDT[48] - load_acc() */
0x80045670, /* FDT[49] - unload_acc() */
0x21800000, /* FDT[50] - and() */
0x21e00000, /* FDT[51] - or() */
0x21500000, /* FDT[52] - xor() */
0x21400000, /* FDT[53] - andn() */
0x21500000, /* FDT[54] - not() */
0x20400000, /* FDT[55] - add() */
0x20500000, /* FDT[56] - sub() */
0x20800000, /* FDT[57] - lsh() */
0x20a00000, /* FDT[58] - rsh() */
0xc0170000, /* FDT[59] - crc8() */
0xc0145670, /* FDT[60] - crc16() */
0xc0345670, /* FDT[61] - crc32() */
0xa0076540, /* FDT[62] - endian32() */
0xa0000760, /* FDT[63] - endian16() */
};
static int bcom_engine_init(void)
{
int task;
phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
unsigned int tdt_size, ctx_size, var_size, fdt_size;
/* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
fdt_size = BCOM_FDT_SIZE;
bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
bcom_sram_free(bcom_eng->tdt);
bcom_sram_free(bcom_eng->ctx);
bcom_sram_free(bcom_eng->var);
bcom_sram_free(bcom_eng->fdt);
return -ENOMEM;
}
memset_io(bcom_eng->tdt, 0x00, tdt_size);
memset_io(bcom_eng->ctx, 0x00, ctx_size);
memset_io(bcom_eng->var, 0x00, var_size);
memset_io(bcom_eng->fdt, 0x00, fdt_size);
/* Copy the FDT for the EU#3 */
memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
/* Initialize Task base structure */
for (task=0; task<BCOM_MAX_TASKS; task++)
{
out_be16(&bcom_eng->regs->tcr[task], 0);
out_8(&bcom_eng->regs->ipr[task], 0);
bcom_eng->tdt[task].context = ctx_pa;
bcom_eng->tdt[task].var = var_pa;
bcom_eng->tdt[task].fdt = fdt_pa;
var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
ctx_pa += BCOM_CTX_SIZE;
}
out_be32(&bcom_eng->regs->taskBar, tdt_pa);
/* Init 'always' initiator */
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
/* Disable COMM Bus Prefetch on the original 5200; it's broken */
if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
bcom_disable_prefetch();
/* Init lock */
spin_lock_init(&bcom_eng->lock);
return 0;
}
static void
bcom_engine_cleanup(void)
{
int task;
/* Stop all tasks */
for (task=0; task<BCOM_MAX_TASKS; task++)
{
out_be16(&bcom_eng->regs->tcr[task], 0);
out_8(&bcom_eng->regs->ipr[task], 0);
}
out_be32(&bcom_eng->regs->taskBar, 0ul);
/* Release the SRAM zones */
bcom_sram_free(bcom_eng->tdt);
bcom_sram_free(bcom_eng->ctx);
bcom_sram_free(bcom_eng->var);
bcom_sram_free(bcom_eng->fdt);
}
/* ======================================================================== */
/* OF platform driver */
/* ======================================================================== */
static int mpc52xx_bcom_probe(struct platform_device *op)
{
struct device_node *ofn_sram;
struct resource res_bcom;
int rv;
/* Inform user we're ok so far */
printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
/* Get the bestcomm node */
of_node_get(op->dev.of_node);
/* Prepare SRAM */
ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
if (!ofn_sram) {
printk(KERN_ERR DRIVER_NAME ": "
"No SRAM found in device tree\n");
rv = -ENODEV;
goto error_ofput;
}
rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
of_node_put(ofn_sram);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error in SRAM init\n");
goto error_ofput;
}
/* Get a clean struct */
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
if (!bcom_eng) {
rv = -ENOMEM;
goto error_sramclean;
}
/* Save the node */
bcom_eng->ofnode = op->dev.of_node;
/* Get, reserve & map io */
if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't get resource\n");
rv = -EINVAL;
goto error_sramclean;
}
if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
DRIVER_NAME)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't request registers region\n");
rv = -EBUSY;
goto error_sramclean;
}
bcom_eng->regs_base = res_bcom.start;
bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
if (!bcom_eng->regs) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't map registers\n");
rv = -ENOMEM;
goto error_release;
}
/* Now, do the real init */
rv = bcom_engine_init();
if (rv)
goto error_unmap;
/* Done ! */
printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
(long)bcom_eng->regs_base);
return 0;
/* Error path */
error_unmap:
iounmap(bcom_eng->regs);
error_release:
release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
error_sramclean:
kfree(bcom_eng);
bcom_sram_cleanup();
error_ofput:
of_node_put(op->dev.of_node);
printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
return rv;
}
static void mpc52xx_bcom_remove(struct platform_device *op)
{
/* Clean up the engine */
bcom_engine_cleanup();
/* Cleanup SRAM */
bcom_sram_cleanup();
/* Release regs */
iounmap(bcom_eng->regs);
release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
/* Release the node */
of_node_put(bcom_eng->ofnode);
/* Release memory */
kfree(bcom_eng);
bcom_eng = NULL;
}
static const struct of_device_id mpc52xx_bcom_of_match[] = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{},
};
MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
.remove = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mpc52xx_bcom_of_match,
},
};
/* ======================================================================== */
/* Module */
/* ======================================================================== */
static int __init
mpc52xx_bcom_init(void)
{
return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
}
static void __exit
mpc52xx_bcom_exit(void)
{
platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
}
/* If we're not a module, we must make sure everything is setup before */
/* anyone tries to use us ... that's why we use subsys_initcall instead */
/* of module_init. */
subsys_initcall(mpc52xx_bcom_init);
module_exit(mpc52xx_bcom_exit);
MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
MODULE_AUTHOR("Sylvain Munaut <[email protected]>");
MODULE_AUTHOR("Andrey Volkov <[email protected]>");
MODULE_AUTHOR("Dale Farnsworth <[email protected]>");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2016 Andreas Färber
*/
/dts-v1/;
#include "meson-gxbb-vega-s95.dtsi"
/ {
compatible = "tronsmart,vega-s95-pro", "tronsmart,vega-s95", "amlogic,meson-gxbb";
model = "Tronsmart Vega S95 Pro";
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADS1100 - Texas Instruments Analog-to-Digital Converter
*
* Copyright (c) 2023, Topic Embedded Products
*
* Datasheet: https://www.ti.com/lit/gpn/ads1100
* IIO driver for ADS1100 and ADS1000 ADC 16-bit I2C
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
/* The ADS1100 has a single byte config register */
/* Conversion in progress bit */
#define ADS1100_CFG_ST_BSY BIT(7)
/* Single conversion bit */
#define ADS1100_CFG_SC BIT(4)
/* Data rate */
#define ADS1100_DR_MASK GENMASK(3, 2)
/* Gain */
#define ADS1100_PGA_MASK GENMASK(1, 0)
#define ADS1100_CONTINUOUS 0
#define ADS1100_SINGLESHOT ADS1100_CFG_SC
#define ADS1100_SLEEP_DELAY_MS 2000
static const int ads1100_data_rate[] = { 128, 32, 16, 8 };
static const int ads1100_data_rate_bits[] = { 12, 14, 15, 16 };
struct ads1100_data {
struct i2c_client *client;
struct regulator *reg_vdd;
struct mutex lock;
int scale_avail[2 * 4]; /* 4 gain settings */
u8 config;
bool supports_data_rate; /* Only the ADS1100 can select the rate */
};
static const struct iio_chan_spec ads1100_channel = {
.type = IIO_VOLTAGE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_CPU,
},
.datasheet_name = "AIN",
};
static int ads1100_set_config_bits(struct ads1100_data *data, u8 mask, u8 value)
{
int ret;
u8 config = (data->config & ~mask) | (value & mask);
if (data->config == config)
return 0; /* Already done */
ret = i2c_master_send(data->client, &config, 1);
if (ret < 0)
return ret;
data->config = config;
return 0;
};
static int ads1100_data_bits(struct ads1100_data *data)
{
return ads1100_data_rate_bits[FIELD_GET(ADS1100_DR_MASK, data->config)];
}
static int ads1100_get_adc_result(struct ads1100_data *data, int chan, int *val)
{
int ret;
__be16 buffer;
s16 value;
if (chan != 0)
return -EINVAL;
ret = pm_runtime_resume_and_get(&data->client->dev);
if (ret < 0)
return ret;
ret = i2c_master_recv(data->client, (char *)&buffer, sizeof(buffer));
pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
if (ret < 0) {
dev_err(&data->client->dev, "I2C read fail: %d\n", ret);
return ret;
}
/* Value is always 16-bit 2's complement */
value = be16_to_cpu(buffer);
/* Shift result to compensate for bit resolution vs. sample rate */
value <<= 16 - ads1100_data_bits(data);
*val = sign_extend32(value, 15);
return 0;
}
static int ads1100_set_scale(struct ads1100_data *data, int val, int val2)
{
int microvolts;
int gain;
/* With Vdd between 2.7 and 5V, the scale is always below 1 */
if (val)
return -EINVAL;
if (!val2)
return -EINVAL;
microvolts = regulator_get_voltage(data->reg_vdd);
/*
* val2 is in 'micro' units, n = val2 / 1000000
* result must be millivolts, d = microvolts / 1000
* the full-scale value is d/n, corresponds to 2^15,
* hence the gain = (d / n) >> 15, factoring out the 1000 and moving the
* bitshift so everything fits in 32-bits yields this formula.
*/
gain = DIV_ROUND_CLOSEST(microvolts, BIT(15)) * MILLI / val2;
if (gain < BIT(0) || gain > BIT(3))
return -EINVAL;
ads1100_set_config_bits(data, ADS1100_PGA_MASK, ffs(gain) - 1);
return 0;
}
static int ads1100_set_data_rate(struct ads1100_data *data, int chan, int rate)
{
unsigned int i;
unsigned int size;
size = data->supports_data_rate ? ARRAY_SIZE(ads1100_data_rate) : 1;
for (i = 0; i < size; i++) {
if (ads1100_data_rate[i] == rate)
return ads1100_set_config_bits(data, ADS1100_DR_MASK,
FIELD_PREP(ADS1100_DR_MASK, i));
}
return -EINVAL;
}
static int ads1100_get_vdd_millivolts(struct ads1100_data *data)
{
return regulator_get_voltage(data->reg_vdd) / (MICRO / MILLI);
}
static void ads1100_calc_scale_avail(struct ads1100_data *data)
{
int millivolts = ads1100_get_vdd_millivolts(data);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(data->scale_avail) / 2; i++) {
data->scale_avail[i * 2 + 0] = millivolts;
data->scale_avail[i * 2 + 1] = 15 + i;
}
}
static int ads1100_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
struct ads1100_data *data = iio_priv(indio_dev);
if (chan->type != IIO_VOLTAGE)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*type = IIO_VAL_INT;
*vals = ads1100_data_rate;
if (data->supports_data_rate)
*length = ARRAY_SIZE(ads1100_data_rate);
else
*length = 1;
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SCALE:
*type = IIO_VAL_FRACTIONAL_LOG2;
*vals = data->scale_avail;
*length = ARRAY_SIZE(data->scale_avail);
return IIO_AVAIL_LIST;
default:
return -EINVAL;
}
}
static int ads1100_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
int ret;
struct ads1100_data *data = iio_priv(indio_dev);
mutex_lock(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
break;
ret = ads1100_get_adc_result(data, chan->address, val);
if (ret >= 0)
ret = IIO_VAL_INT;
iio_device_release_direct_mode(indio_dev);
break;
case IIO_CHAN_INFO_SCALE:
/* full-scale is the supply voltage in millivolts */
*val = ads1100_get_vdd_millivolts(data);
*val2 = 15 + FIELD_GET(ADS1100_PGA_MASK, data->config);
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = ads1100_data_rate[FIELD_GET(ADS1100_DR_MASK,
data->config)];
ret = IIO_VAL_INT;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&data->lock);
return ret;
}
static int ads1100_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val,
int val2, long mask)
{
struct ads1100_data *data = iio_priv(indio_dev);
int ret;
mutex_lock(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
ret = ads1100_set_scale(data, val, val2);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = ads1100_set_data_rate(data, chan->address, val);
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&data->lock);
return ret;
}
static const struct iio_info ads1100_info = {
.read_avail = ads1100_read_avail,
.read_raw = ads1100_read_raw,
.write_raw = ads1100_write_raw,
};
static int ads1100_setup(struct ads1100_data *data)
{
int ret;
u8 buffer[3];
/* Setup continuous sampling mode at 8sps */
buffer[0] = ADS1100_DR_MASK | ADS1100_CONTINUOUS;
ret = i2c_master_send(data->client, buffer, 1);
if (ret < 0)
return ret;
ret = i2c_master_recv(data->client, buffer, sizeof(buffer));
if (ret < 0)
return ret;
/* Config register returned in third byte, strip away the busy status */
data->config = buffer[2] & ~ADS1100_CFG_ST_BSY;
/* Detect the sample rate capability by checking the DR bits */
data->supports_data_rate = FIELD_GET(ADS1100_DR_MASK, buffer[2]) != 0;
return 0;
}
static void ads1100_reg_disable(void *reg)
{
regulator_disable(reg);
}
static void ads1100_disable_continuous(void *data)
{
ads1100_set_config_bits(data, ADS1100_CFG_SC, ADS1100_SINGLESHOT);
}
static int ads1100_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct ads1100_data *data;
struct device *dev = &client->dev;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
dev_set_drvdata(dev, data);
data->client = client;
mutex_init(&data->lock);
indio_dev->name = "ads1100";
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &ads1100_channel;
indio_dev->num_channels = 1;
indio_dev->info = &ads1100_info;
data->reg_vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(data->reg_vdd))
return dev_err_probe(dev, PTR_ERR(data->reg_vdd),
"Failed to get vdd regulator\n");
ret = regulator_enable(data->reg_vdd);
if (ret < 0)
return dev_err_probe(dev, ret,
"Failed to enable vdd regulator\n");
ret = devm_add_action_or_reset(dev, ads1100_reg_disable, data->reg_vdd);
if (ret)
return ret;
ret = ads1100_setup(data);
if (ret)
return dev_err_probe(dev, ret,
"Failed to communicate with device\n");
ret = devm_add_action_or_reset(dev, ads1100_disable_continuous, data);
if (ret)
return ret;
ads1100_calc_scale_avail(data);
pm_runtime_set_autosuspend_delay(dev, ADS1100_SLEEP_DELAY_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_active(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to enable pm_runtime\n");
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return dev_err_probe(dev, ret,
"Failed to register IIO device\n");
return 0;
}
static int ads1100_runtime_suspend(struct device *dev)
{
struct ads1100_data *data = dev_get_drvdata(dev);
ads1100_set_config_bits(data, ADS1100_CFG_SC, ADS1100_SINGLESHOT);
regulator_disable(data->reg_vdd);
return 0;
}
static int ads1100_runtime_resume(struct device *dev)
{
struct ads1100_data *data = dev_get_drvdata(dev);
int ret;
ret = regulator_enable(data->reg_vdd);
if (ret) {
dev_err(&data->client->dev, "Failed to enable Vdd\n");
return ret;
}
/*
* We'll always change the mode bit in the config register, so there is
* no need here to "force" a write to the config register. If the device
* has been power-cycled, we'll re-write its config register now.
*/
return ads1100_set_config_bits(data, ADS1100_CFG_SC,
ADS1100_CONTINUOUS);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ads1100_pm_ops,
ads1100_runtime_suspend,
ads1100_runtime_resume,
NULL);
static const struct i2c_device_id ads1100_id[] = {
{ "ads1100" },
{ "ads1000" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ads1100_id);
static const struct of_device_id ads1100_of_match[] = {
{.compatible = "ti,ads1100" },
{.compatible = "ti,ads1000" },
{ }
};
MODULE_DEVICE_TABLE(of, ads1100_of_match);
static struct i2c_driver ads1100_driver = {
.driver = {
.name = "ads1100",
.of_match_table = ads1100_of_match,
.pm = pm_ptr(&ads1100_pm_ops),
},
.probe = ads1100_probe,
.id_table = ads1100_id,
};
module_i2c_driver(ads1100_driver);
MODULE_AUTHOR("Mike Looijmans <[email protected]>");
MODULE_DESCRIPTION("Texas Instruments ADS1100 ADC driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Intel Corporation
* Copyright 2018 Google LLC.
*
* Author: Tomasz Figa <[email protected]>
* Author: Yong Zhi <[email protected]>
*/
#include <linux/vmalloc.h>
#include "ipu3.h"
#include "ipu3-css-pool.h"
#include "ipu3-mmu.h"
#include "ipu3-dmamap.h"
/*
* Free a buffer allocated by imgu_dmamap_alloc_buffer()
*/
static void imgu_dmamap_free_buffer(struct page **pages,
size_t size)
{
int count = size >> PAGE_SHIFT;
while (count--)
__free_page(pages[count]);
kvfree(pages);
}
/*
* Based on the implementation of __iommu_dma_alloc_pages()
* defined in drivers/iommu/dma-iommu.c
*/
static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, count = size >> PAGE_SHIFT;
unsigned int order_mask = 1;
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
/* Allocate mem for array of page ptrs */
pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
while (count) {
struct page *page = NULL;
unsigned int order_size;
for (order_mask &= (2U << __fls(count)) - 1;
order_mask; order_mask &= ~order_size) {
unsigned int order = __fls(order_mask);
order_size = 1U << order;
page = alloc_pages((order_mask - order_size) ?
gfp | high_order_gfp : gfp, order);
if (!page)
continue;
if (!order)
break;
if (!PageCompound(page)) {
split_page(page, order);
break;
}
__free_pages(page, order);
}
if (!page) {
imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
return NULL;
}
count -= order_size;
while (order_size--)
pages[i++] = page++;
}
return pages;
}
/**
* imgu_dmamap_alloc - allocate and map a buffer into KVA
* @imgu: struct device pointer
* @map: struct to store mapping variables
* @len: size required
*
* Returns:
* KVA on success
* %NULL on failure
*/
void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
int count = size >> PAGE_SHIFT;
struct page **pages;
dma_addr_t iovaddr;
struct iova *iova;
int i, rval;
dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
iova = alloc_iova(&imgu->iova_domain, size >> shift,
imgu->mmu->aperture_end >> shift, 0);
if (!iova)
return NULL;
pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
if (!pages)
goto out_free_iova;
/* Call IOMMU driver to setup pgt */
iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
for (i = 0; i < count; ++i) {
rval = imgu_mmu_map(imgu->mmu, iovaddr,
page_to_phys(pages[i]), PAGE_SIZE);
if (rval)
goto out_unmap;
iovaddr += PAGE_SIZE;
}
map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
if (!map->vaddr)
goto out_unmap;
map->pages = pages;
map->size = size;
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
size, &map->daddr, map->vaddr);
return map->vaddr;
out_unmap:
imgu_dmamap_free_buffer(pages, size);
imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
i * PAGE_SIZE);
out_free_iova:
__free_iova(&imgu->iova_domain, iova);
return NULL;
}
void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
{
struct iova *iova;
iova = find_iova(&imgu->iova_domain,
iova_pfn(&imgu->iova_domain, map->daddr));
if (WARN_ON(!iova))
return;
imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
iova_size(iova) << iova_shift(&imgu->iova_domain));
__free_iova(&imgu->iova_domain, iova);
}
/*
* Counterpart of imgu_dmamap_alloc
*/
void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
{
dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
__func__, map->size, &map->daddr, map->vaddr);
if (!map->vaddr)
return;
imgu_dmamap_unmap(imgu, map);
vunmap(map->vaddr);
imgu_dmamap_free_buffer(map->pages, map->size);
map->vaddr = NULL;
}
int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
int nents, struct imgu_css_map *map)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
struct scatterlist *sg;
struct iova *iova;
size_t size = 0;
int i;
for_each_sg(sglist, sg, nents, i) {
if (sg->offset)
return -EINVAL;
if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
return -EINVAL;
size += sg->length;
}
size = iova_align(&imgu->iova_domain, size);
dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
nents, size >> shift);
iova = alloc_iova(&imgu->iova_domain, size >> shift,
imgu->mmu->aperture_end >> shift, 0);
if (!iova)
return -ENOMEM;
dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
iova->pfn_lo, iova->pfn_hi);
if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
sglist, nents) < size)
goto out_fail;
memset(map, 0, sizeof(*map));
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
map->size = size;
return 0;
out_fail:
__free_iova(&imgu->iova_domain, iova);
return -EFAULT;
}
int imgu_dmamap_init(struct imgu_device *imgu)
{
unsigned long order, base_pfn;
int ret = iova_cache_get();
if (ret)
return ret;
order = __ffs(IPU3_PAGE_SIZE);
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
return 0;
}
void imgu_dmamap_exit(struct imgu_device *imgu)
{
put_iova_domain(&imgu->iova_domain);
iova_cache_put();
}
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/eventpoll.h ( Efficient event polling implementation )
* Copyright (C) 2001,...,2006 Davide Libenzi
*
* Davide Libenzi <[email protected]>
*/
#ifndef _LINUX_EVENTPOLL_H
#define _LINUX_EVENTPOLL_H
#include <uapi/linux/eventpoll.h>
#include <uapi/linux/kcmp.h>
/* Forward declarations to avoid compiler errors */
struct file;
#ifdef CONFIG_EPOLL
#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
/*
* This is called from inside fs/file_table.c:__fput() to unlink files
* from the eventpoll interface. We need to have this facility to cleanup
* correctly files that are closed without being removed from the eventpoll
* interface.
*/
static inline void eventpoll_release(struct file *file)
{
/*
* Fast check to avoid the get/release of the semaphore. Since
* we're doing this outside the semaphore lock, it might return
* false negatives, but we don't care. It'll help in 99.99% of cases
* to avoid the semaphore lock. False positives simply cannot happen
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
if (likely(!READ_ONCE(file->f_ep)))
return;
/*
* The file is being closed while it is still linked to an epoll
* descriptor. We need to handle this by correctly unlinking it
* from its containers.
*/
eventpoll_release_file(file);
}
int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
bool nonblock);
/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
static inline int ep_op_has_event(int op)
{
return op != EPOLL_CTL_DEL;
}
#else
static inline void eventpoll_release(struct file *file) {}
#endif
#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
/* ARM OABI has an incompatible struct layout and needs a special handler */
extern struct epoll_event __user *
epoll_put_uevent(__poll_t revents, __u64 data,
struct epoll_event __user *uevent);
#else
static inline struct epoll_event __user *
epoll_put_uevent(__poll_t revents, __u64 data,
struct epoll_event __user *uevent)
{
if (__put_user(revents, &uevent->events) ||
__put_user(data, &uevent->data))
return NULL;
return uevent+1;
}
#endif
#endif /* #ifndef _LINUX_EVENTPOLL_H */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2002 Jean-Francois Dive <[email protected]>
* Copyright (c) 2007 Nokia Siemens Networks
*/
#ifndef _CRYPTO_TCRYPT_H
#define _CRYPTO_TCRYPT_H
struct cipher_speed_template {
const char *key;
unsigned int klen;
};
struct aead_speed_template {
const char *key;
unsigned int klen;
};
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
};
/*
* DES test vectors.
*/
#define DES3_SPEED_VECTORS 1
static struct cipher_speed_template des3_speed_template[] = {
{
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
"\x55\x55\x55\x55\x55\x55\x55\x55"
"\xfe\xdc\xba\x98\x76\x54\x32\x10",
.klen = 24,
}
};
/*
* Cipher speed tests
*/
static u8 speed_template_8[] = {8, 0};
static u8 speed_template_16[] = {16, 0};
static u8 speed_template_24[] = {24, 0};
static u8 speed_template_8_16[] = {8, 16, 0};
static u8 speed_template_8_32[] = {8, 32, 0};
static u8 speed_template_16_32[] = {16, 32, 0};
static u8 speed_template_16_24_32[] = {16, 24, 32, 0};
static u8 speed_template_20_28_36[] = {20, 28, 36, 0};
static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
static u8 speed_template_32_48[] = {32, 48, 0};
static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
static u8 speed_template_32_64[] = {32, 64, 0};
static u8 speed_template_32[] = {32, 0};
/*
* AEAD speed tests
*/
static u8 aead_speed_template_19[] = {19, 0};
static u8 aead_speed_template_20_28_36[] = {20, 28, 36, 0};
static u8 aead_speed_template_36[] = {36, 0};
/*
* Digest speed tests
*/
static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 16, .plen = 16, },
{ .blen = 64, .plen = 16, },
{ .blen = 64, .plen = 64, },
{ .blen = 256, .plen = 16, },
{ .blen = 256, .plen = 64, },
{ .blen = 256, .plen = 256, },
{ .blen = 1024, .plen = 16, },
{ .blen = 1024, .plen = 256, },
{ .blen = 1024, .plen = 1024, },
{ .blen = 2048, .plen = 16, },
{ .blen = 2048, .plen = 256, },
{ .blen = 2048, .plen = 1024, },
{ .blen = 2048, .plen = 2048, },
{ .blen = 4096, .plen = 16, },
{ .blen = 4096, .plen = 256, },
{ .blen = 4096, .plen = 1024, },
{ .blen = 4096, .plen = 4096, },
{ .blen = 8192, .plen = 16, },
{ .blen = 8192, .plen = 256, },
{ .blen = 8192, .plen = 1024, },
{ .blen = 8192, .plen = 4096, },
{ .blen = 8192, .plen = 8192, },
/* End marker */
{ .blen = 0, .plen = 0, }
};
static struct hash_speed poly1305_speed_template[] = {
{ .blen = 96, .plen = 16, },
{ .blen = 96, .plen = 32, },
{ .blen = 96, .plen = 96, },
{ .blen = 288, .plen = 16, },
{ .blen = 288, .plen = 32, },
{ .blen = 288, .plen = 288, },
{ .blen = 1056, .plen = 32, },
{ .blen = 1056, .plen = 1056, },
{ .blen = 2080, .plen = 32, },
{ .blen = 2080, .plen = 2080, },
{ .blen = 4128, .plen = 4128, },
{ .blen = 8224, .plen = 8224, },
/* End marker */
{ .blen = 0, .plen = 0, }
};
#endif /* _CRYPTO_TCRYPT_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2413.c - w1 family 3a (DS2413) driver
* based on w1_ds2408.c by Jean-Francois Dagenais <[email protected]>
*
* Copyright (c) 2013 Mariusz Bialonczyk <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2413 0x3A
#define W1_F3A_RETRIES 3
#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
#define W1_F3A_INVALID_PIO_STATE 0xFF
static ssize_t state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F3A_RETRIES;
ssize_t bytes_read = -EIO;
u8 state;
dev_dbg(&sl->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
next:
if (w1_reset_select_slave(sl))
goto out;
while (retries--) {
w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
state = w1_read_8(sl->master);
if ((state & 0x0F) == ((~state >> 4) & 0x0F)) {
/* complement is correct */
*buf = state;
bytes_read = 1;
goto out;
} else if (state == W1_F3A_INVALID_PIO_STATE) {
/* slave didn't respond, try to select it again */
dev_warn(&sl->dev, "slave device did not respond to PIO_ACCESS_READ, " \
"reselecting, retries left: %d\n", retries);
goto next;
}
if (w1_reset_resume_command(sl->master))
goto out; /* unrecoverable error */
dev_warn(&sl->dev, "PIO_ACCESS_READ error, retries left: %d\n", retries);
}
out:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
(bytes_read > 0) ? "succeeded" : "error", retries);
return bytes_read;
}
static BIN_ATTR_RO(state, 1);
static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
unsigned int retries = W1_F3A_RETRIES;
ssize_t bytes_written = -EIO;
if (count != 1 || off != 0)
return -EFAULT;
dev_dbg(&sl->dev, "locking mutex for write_output");
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
goto out;
/*
* according to the DS2413 datasheet the most significant 6 bits
* should be set to "1"s, so do it now
*/
*buf = *buf | 0xFC;
while (retries--) {
w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
w1_buf[1] = *buf;
w1_buf[2] = ~(*buf);
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
bytes_written = 1;
goto out;
}
if (w1_reset_resume_command(sl->master))
goto out; /* unrecoverable error */
dev_warn(&sl->dev, "PIO_ACCESS_WRITE error, retries left: %d\n", retries);
}
out:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
(bytes_written > 0) ? "succeeded" : "error", retries);
return bytes_written;
}
static BIN_ATTR(output, 0664, NULL, output_write, 1);
static struct bin_attribute *w1_f3a_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
NULL,
};
static const struct attribute_group w1_f3a_group = {
.bin_attrs = w1_f3a_bin_attrs,
};
static const struct attribute_group *w1_f3a_groups[] = {
&w1_f3a_group,
NULL,
};
static const struct w1_family_ops w1_f3a_fops = {
.groups = w1_f3a_groups,
};
static struct w1_family w1_family_3a = {
.fid = W1_FAMILY_DS2413,
.fops = &w1_f3a_fops,
};
module_w1_family(w1_family_3a);
MODULE_AUTHOR("Mariusz Bialonczyk <[email protected]>");
MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_PGTABLE_BE_TYPES_H
#define _ASM_POWERPC_PGTABLE_BE_TYPES_H
#include <asm/cmpxchg.h>
/* PTE level */
typedef struct { __be64 pte; } pte_t;
#define __pte(x) ((pte_t) { cpu_to_be64(x) })
#define __pte_raw(x) ((pte_t) { (x) })
static inline unsigned long pte_val(pte_t x)
{
return be64_to_cpu(x.pte);
}
static inline __be64 pte_raw(pte_t x)
{
return x.pte;
}
/* PMD level */
#ifdef CONFIG_PPC64
typedef struct { __be64 pmd; } pmd_t;
#define __pmd(x) ((pmd_t) { cpu_to_be64(x) })
#define __pmd_raw(x) ((pmd_t) { (x) })
static inline unsigned long pmd_val(pmd_t x)
{
return be64_to_cpu(x.pmd);
}
static inline __be64 pmd_raw(pmd_t x)
{
return x.pmd;
}
/* 64 bit always use 4 level table. */
typedef struct { __be64 pud; } pud_t;
#define __pud(x) ((pud_t) { cpu_to_be64(x) })
#define __pud_raw(x) ((pud_t) { (x) })
static inline unsigned long pud_val(pud_t x)
{
return be64_to_cpu(x.pud);
}
static inline __be64 pud_raw(pud_t x)
{
return x.pud;
}
#endif /* CONFIG_PPC64 */
/* PGD level */
typedef struct { __be64 pgd; } pgd_t;
#define __pgd(x) ((pgd_t) { cpu_to_be64(x) })
#define __pgd_raw(x) ((pgd_t) { (x) })
static inline unsigned long pgd_val(pgd_t x)
{
return be64_to_cpu(x.pgd);
}
static inline __be64 pgd_raw(pgd_t x)
{
return x.pgd;
}
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
/*
* With hash config 64k pages additionally define a bigger "real PTE" type that
* gathers the "second half" part of the PTE for pseudo 64k pages
*/
#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
#endif
static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{
unsigned long *p = (unsigned long *)ptep;
__be64 prev;
/* See comment in switch_mm_irqs_off() */
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
(__force unsigned long)pte_raw(new));
return pte_raw(old) == prev;
}
static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
{
unsigned long *p = (unsigned long *)pmdp;
__be64 prev;
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pmd_raw(old),
(__force unsigned long)pmd_raw(new));
return pmd_raw(old) == prev;
}
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Allegro A8293 SEC driver
*
* Copyright (C) 2011 Antti Palosaari <[email protected]>
*/
#ifndef A8293_H
#define A8293_H
#include <media/dvb_frontend.h>
/*
* I2C address
* 0x08, 0x09, 0x0a, 0x0b
*/
/**
* struct a8293_platform_data - Platform data for the a8293 driver
* @dvb_frontend: DVB frontend.
* @volt_slew_nanos_per_mv: Slew rate when increasing LNB voltage,
* in nanoseconds per millivolt.
*/
struct a8293_platform_data {
struct dvb_frontend *dvb_frontend;
int volt_slew_nanos_per_mv;
};
#endif /* A8293_H */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
#include "snic_io.h"
#include "wq_enet_desc.h"
#include "vnic_wq.h"
#include "snic_fwint.h"
#include "vnic_cq_fw.h"
static inline void
snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
u32 data_len, u16 sg_cnt, ulong sgl_addr,
dma_addr_t sns_addr_pa, u32 sense_len)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
ctx);
req->u.icmnd.flags = cpu_to_le16(flags);
req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req->u.icmnd.cdb_len = cdb_len;
memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req->u.icmnd.data_len = cpu_to_le32(data_len);
req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
}
static inline void
snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req->u.itmf.tm_type = tm_type;
req->u.itmf.flags = cpu_to_le16(flags);
/* req_id valid only in abort, clear task */
req->u.itmf.req_id = cpu_to_le32(req_id);
req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
}
static inline void
snic_queue_wq_eth_desc(struct vnic_wq *wq,
void *os_buf,
dma_addr_t dma_addr,
unsigned int len,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry)
{
struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
0, /* fc_eof */
0, /* offload mode */
1, /* eop */
(u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
}
struct snic;
int snic_get_vnic_config(struct snic *);
int snic_alloc_vnic_res(struct snic *);
void snic_free_vnic_res(struct snic *);
void snic_get_res_counts(struct snic *);
void snic_log_q_error(struct snic *);
int snic_get_vnic_resources_size(struct snic *);
#endif /* __SNIC_RES_H */
|
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __SMU_V14_0_0_PPSMC_H__
#define __SMU_V14_0_0_PPSMC_H__
/*! @mainpage PMFW-PPS (PPLib) Message Interface
This documentation contains the subsections:\n\n
@ref ResponseCodes\n
@ref definitions\n
@ref enums\n
*/
/** @def PPS_PMFW_IF_VER
* PPS (PPLib) to PMFW IF version 1.0
*/
#define PPS_PMFW_IF_VER "1.0" ///< Major.Minor
/** @defgroup ResponseCodes PMFW Response Codes
* @{
*/
// SMU Response Codes:
#define PPSMC_Result_OK 0x1 ///< Message Response OK
#define PPSMC_Result_Failed 0xFF ///< Message Response Failed
#define PPSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
#define PPSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
#define PPSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
/** @}*/
/** @defgroup definitions Message definitions
* @{
*/
// Message Definitions:
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_TransferTableSmu2Dram 0x0F ///< Transfer driver interface table from PMFW SRAM to DRAM
#define PPSMC_MSG_TransferTableDram2Smu 0x10 ///< Transfer driver interface table from DRAM to PMFW SRAM
#define PPSMC_MSG_GfxDeviceDriverReset 0x11 ///< Request GFX mode 2 reset
#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
#define PPSMC_MSG_PowerDownVpe 0x32 ///< Power down VPE
#define PPSMC_MSG_GetVpeDpmTable 0x33 ///< Get VPE DPM table
#define PPSMC_MSG_EnableLSdma 0x34 ///< Enable LSDMA
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**
* @defgroup enums Enum Definitions
* @{
*/
/** @enum Mode_Reset_e
* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset
*/
//argument for PPSMC_MSG_GfxDeviceDriverReset
typedef enum {
MODE1_RESET = 1, ///< Mode reset type 1
MODE2_RESET = 2 ///< Mode reset type 2
} Mode_Reset_e;
/** @}*/
/** @enum ZStates_e
* Zstate types, argument for PPSMC_MSG_AllowZstates
*/
//Argument for PPSMC_MSG_AllowZstates
typedef enum {
DISALLOW_ZSTATES = 0, ///< Disallow Zstates
ALLOW_ZSTATES_Z8 = 8, ///< Allows Z8 only
ALLOW_ZSTATES_Z9 = 9, ///< Allows Z9 and Z8
} ZStates_e;
/** @}*/
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys HSDK SDP Generic PLL clock driver
*
* Copyright (C) 2017 Synopsys
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
#define CGU_PLL_CTRL_ODIV_SHIFT 2
#define CGU_PLL_CTRL_IDIV_SHIFT 4
#define CGU_PLL_CTRL_FBDIV_SHIFT 9
#define CGU_PLL_CTRL_BAND_SHIFT 20
#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
#define CGU_PLL_CTRL_PD BIT(0)
#define CGU_PLL_CTRL_BYPASS BIT(1)
#define CGU_PLL_STATUS_LOCK BIT(0)
#define CGU_PLL_STATUS_ERR BIT(1)
#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
#define CGU_PLL_SOURCE_MAX 1
#define CORE_IF_CLK_THRESHOLD_HZ 500000000
#define CREG_CORE_IF_CLK_DIV_1 0x0
#define CREG_CORE_IF_CLK_DIV_2 0x1
struct hsdk_pll_cfg {
u32 rate;
u32 idiv;
u32 fbdiv;
u32 odiv;
u32 band;
u32 bypass;
};
static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
{ 100000000, 0, 11, 3, 0, 0 },
{ 133000000, 0, 15, 3, 0, 0 },
{ 200000000, 1, 47, 3, 0, 0 },
{ 233000000, 1, 27, 2, 0, 0 },
{ 300000000, 1, 35, 2, 0, 0 },
{ 333000000, 1, 39, 2, 0, 0 },
{ 400000000, 1, 47, 2, 0, 0 },
{ 500000000, 0, 14, 1, 0, 0 },
{ 600000000, 0, 17, 1, 0, 0 },
{ 700000000, 0, 20, 1, 0, 0 },
{ 800000000, 0, 23, 1, 0, 0 },
{ 900000000, 1, 26, 0, 0, 0 },
{ 1000000000, 1, 29, 0, 0, 0 },
{ 1100000000, 1, 32, 0, 0, 0 },
{ 1200000000, 1, 35, 0, 0, 0 },
{ 1300000000, 1, 38, 0, 0, 0 },
{ 1400000000, 1, 41, 0, 0, 0 },
{ 1500000000, 1, 44, 0, 0, 0 },
{ 1600000000, 1, 47, 0, 0, 0 },
{}
};
static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
{ 27000000, 0, 0, 0, 0, 1 },
{ 148500000, 0, 21, 3, 0, 0 },
{ 297000000, 0, 21, 2, 0, 0 },
{ 540000000, 0, 19, 1, 0, 0 },
{ 594000000, 0, 21, 1, 0, 0 },
{}
};
struct hsdk_pll_clk {
struct clk_hw hw;
void __iomem *regs;
void __iomem *spec_regs;
const struct hsdk_pll_devdata *pll_devdata;
struct device *dev;
};
struct hsdk_pll_devdata {
const struct hsdk_pll_cfg *pll_cfg;
int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
const struct hsdk_pll_cfg *cfg);
};
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static const struct hsdk_pll_devdata core_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_core_update_rate,
};
static const struct hsdk_pll_devdata sdt_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static const struct hsdk_pll_devdata hdmi_pll_devdata = {
.pll_cfg = hdmi_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
{
iowrite32(val, clk->regs + reg);
}
static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
{
return ioread32(clk->regs + reg);
}
static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
const struct hsdk_pll_cfg *cfg)
{
u32 val = 0;
if (cfg->bypass) {
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
val |= CGU_PLL_CTRL_BYPASS;
} else {
/* Powerdown and Bypass bits should be cleared */
val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
}
dev_dbg(clk->dev, "write configuration: %#x\n", val);
hsdk_pll_write(clk, CGU_PLL_CTRL, val);
}
static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
}
static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
}
static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
{
return container_of(hw, struct hsdk_pll_clk, hw);
}
static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u32 val;
u64 rate;
u32 idiv, fbdiv, odiv;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
dev_dbg(clk->dev, "current configuration: %#x\n", val);
/* Check if PLL is bypassed */
if (val & CGU_PLL_CTRL_BYPASS)
return parent_rate;
/* Check if PLL is disabled */
if (val & CGU_PLL_CTRL_PD)
return 0;
/* input divider = reg.idiv + 1 */
idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
/* fb divider = 2*(reg.fbdiv + 1) */
fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
/* output divider = 2^(reg.odiv) */
odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
rate = (u64)parent_rate * fbdiv;
do_div(rate, idiv * odiv);
return rate;
}
static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
unsigned long best_rate;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
if (pll_cfg[0].rate == 0)
return -EINVAL;
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
return best_rate;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
return 0;
}
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
/*
* When core clock exceeds 500MHz, the divider for the interface
* clock must be programmed to div-by-2.
*/
if (rate > CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
/*
* Program divider to div-by-1 if we succesfuly set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
return 0;
}
static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int i;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
for (i = 0; pll_cfg[i].rate != 0; i++) {
if (pll_cfg[i].rate == rate) {
return clk->pll_devdata->update_rate(clk, rate,
&pll_cfg[i]);
}
}
dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
parent_rate);
return -EINVAL;
}
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
.round_rate = hsdk_pll_round_rate,
.set_rate = hsdk_pll_set_rate,
};
static int hsdk_pll_clk_probe(struct platform_device *pdev)
{
int ret;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
struct device *dev = &pdev->dev;
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return -ENOMEM;
pll_clk->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->regs))
return PTR_ERR(pll_clk->regs);
init.name = dev->of_node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(dev->of_node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(dev->of_node);
if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
dev_err(dev, "wrong clock parents number: %u\n", num_parents);
return -EINVAL;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->dev = dev;
pll_clk->pll_devdata = of_device_get_match_data(dev);
if (!pll_clk->pll_devdata) {
dev_err(dev, "No OF match data provided\n");
return -EINVAL;
}
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret) {
dev_err(dev, "failed to register %s clock\n", init.name);
return ret;
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&pll_clk->hw);
}
static void __init of_hsdk_pll_clk_setup(struct device_node *node)
{
int ret;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return;
pll_clk->regs = of_iomap(node, 0);
if (!pll_clk->regs) {
pr_err("failed to map pll registers\n");
goto err_free_pll_clk;
}
pll_clk->spec_regs = of_iomap(node, 1);
if (!pll_clk->spec_regs) {
pr_err("failed to map pll registers\n");
goto err_unmap_comm_regs;
}
init.name = node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(node);
if (num_parents > CGU_PLL_SOURCE_MAX) {
pr_err("too much clock parents: %u\n", num_parents);
goto err_unmap_spec_regs;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->pll_devdata = &core_pll_devdata;
ret = clk_hw_register(NULL, &pll_clk->hw);
if (ret) {
pr_err("failed to register %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
if (ret) {
pr_err("failed to add hw provider for %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
return;
err_unmap_spec_regs:
iounmap(pll_clk->spec_regs);
err_unmap_comm_regs:
iounmap(pll_clk->regs);
err_free_pll_clk:
kfree(pll_clk);
}
/* Core PLL needed early for ARC cpus timers */
CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
of_hsdk_pll_clk_setup);
static const struct of_device_id hsdk_pll_clk_id[] = {
{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
{ }
};
static struct platform_driver hsdk_pll_clk_driver = {
.driver = {
.name = "hsdk-gp-pll-clock",
.of_match_table = hsdk_pll_clk_id,
},
.probe = hsdk_pll_clk_probe,
};
builtin_platform_driver(hsdk_pll_clk_driver);
|
// SPDX-License-Identifier: GPL-2.0
/*
* MPC5200 PSC serial console support.
*
* Author: Grant Likely <[email protected]>
*
* Copyright (c) 2007 Secret Lab Technologies Ltd.
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* It is assumed that the firmware (or the platform file) has already set
* up the port.
*/
#include "types.h"
#include "io.h"
#include "ops.h"
/* Programmable Serial Controller (PSC) status register bits */
#define MPC52xx_PSC_SR 0x04
#define MPC52xx_PSC_SR_RXRDY 0x0100
#define MPC52xx_PSC_SR_RXFULL 0x0200
#define MPC52xx_PSC_SR_TXRDY 0x0400
#define MPC52xx_PSC_SR_TXEMP 0x0800
#define MPC52xx_PSC_BUFFER 0x0C
static void *psc;
static int psc_open(void)
{
/* Assume the firmware has already configured the PSC into
* uart mode */
return 0;
}
static void psc_putc(unsigned char c)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ;
out_8(psc + MPC52xx_PSC_BUFFER, c);
}
static unsigned char psc_tstc(void)
{
return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0;
}
static unsigned char psc_getc(void)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ;
return in_8(psc + MPC52xx_PSC_BUFFER);
}
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp)
{
/* Get the base address of the psc registers */
if (dt_get_virtual_reg(devp, &psc, 1) < 1)
return -1;
scdp->open = psc_open;
scdp->putc = psc_putc;
scdp->getc = psc_getc;
scdp->tstc = psc_tstc;
return 0;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH73A0_H__
#define __ASM_SH73A0_H__
extern const struct smp_operations sh73a0_smp_ops;
#endif /* __ASM_SH73A0_H__ */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Glibc independent futex library for testing kernel functionality.
* Shamelessly stolen from Darren Hart <[email protected]>
* http://git.kernel.org/cgit/linux/kernel/git/dvhart/futextest.git/
*/
#ifndef _FUTEX_H
#define _FUTEX_H
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <linux/futex.h>
struct bench_futex_parameters {
bool silent;
bool fshared;
bool mlockall;
bool multi; /* lock-pi */
bool pi; /* requeue-pi */
bool broadcast; /* requeue */
unsigned int runtime; /* seconds*/
unsigned int nthreads;
unsigned int nfutexes;
unsigned int nwakes;
unsigned int nrequeue;
};
/**
* futex_syscall() - SYS_futex syscall wrapper
* @uaddr: address of first futex
* @op: futex op code
* @val: typically expected value of uaddr, but varies by op
* @timeout: typically an absolute struct timespec (except where noted
* otherwise). Overloaded by some ops
* @uaddr2: address of second futex for some ops
* @val3: varies by op
* @opflags: flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG
*
* futex_syscall() is used by all the following futex op wrappers. It can also be
* used for misuse and abuse testing. Generally, the specific op wrappers
* should be used instead.
*
* These argument descriptions are the defaults for all
* like-named arguments in the following wrappers except where noted below.
*/
static inline int
futex_syscall(volatile u_int32_t *uaddr, int op, u_int32_t val, struct timespec *timeout,
volatile u_int32_t *uaddr2, int val3, int opflags)
{
return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
}
static inline int
futex_syscall_nr_requeue(volatile u_int32_t *uaddr, int op, u_int32_t val, int nr_requeue,
volatile u_int32_t *uaddr2, int val3, int opflags)
{
return syscall(SYS_futex, uaddr, op | opflags, val, nr_requeue, uaddr2, val3);
}
/**
* futex_wait() - block on uaddr with optional timeout
* @timeout: relative timeout
*/
static inline int
futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags)
{
return futex_syscall(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
}
/**
* futex_wake() - wake one or more tasks blocked on uaddr
* @nr_wake: wake up to this many tasks
*/
static inline int
futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
{
return futex_syscall(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
}
/**
* futex_lock_pi() - block on uaddr as a PI mutex
*/
static inline int
futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int opflags)
{
return futex_syscall(uaddr, FUTEX_LOCK_PI, 0, timeout, NULL, 0, opflags);
}
/**
* futex_unlock_pi() - release uaddr as a PI mutex, waking the top waiter
*/
static inline int
futex_unlock_pi(u_int32_t *uaddr, int opflags)
{
return futex_syscall(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags);
}
/**
* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
* @nr_wake: wake up to this many tasks
* @nr_requeue: requeue up to this many tasks
*/
static inline int
futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake,
int nr_requeue, int opflags)
{
return futex_syscall_nr_requeue(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
val, opflags);
}
/**
* futex_wait_requeue_pi() - block on uaddr and prepare to requeue to uaddr2
* @uaddr: non-PI futex source
* @uaddr2: PI futex target
*
* This is the first half of the requeue_pi mechanism. It shall always be
* paired with futex_cmp_requeue_pi().
*/
static inline int
futex_wait_requeue_pi(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2,
struct timespec *timeout, int opflags)
{
return futex_syscall(uaddr, FUTEX_WAIT_REQUEUE_PI, val, timeout, uaddr2, 0,
opflags);
}
/**
* futex_cmp_requeue_pi() - requeue tasks from uaddr to uaddr2
* @uaddr: non-PI futex source
* @uaddr2: PI futex target
* @nr_requeue: requeue up to this many tasks
*
* This is the second half of the requeue_pi mechanism. It shall always be
* paired with futex_wait_requeue_pi(). The first waker is always awoken.
*/
static inline int
futex_cmp_requeue_pi(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2,
int nr_requeue, int opflags)
{
return futex_syscall_nr_requeue(uaddr, FUTEX_CMP_REQUEUE_PI, 1, nr_requeue, uaddr2,
val, opflags);
}
#endif /* _FUTEX_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* cxd2880_dvbt2.h
* Sony CXD2880 DVB-T2/T tuner + demodulator driver
* DVB-T2 related definitions
*
* Copyright (C) 2016, 2017, 2018 Sony Semiconductor Solutions Corporation
*/
#ifndef CXD2880_DVBT2_H
#define CXD2880_DVBT2_H
#include "cxd2880_common.h"
enum cxd2880_dvbt2_profile {
CXD2880_DVBT2_PROFILE_BASE,
CXD2880_DVBT2_PROFILE_LITE,
CXD2880_DVBT2_PROFILE_ANY
};
enum cxd2880_dvbt2_version {
CXD2880_DVBT2_V111,
CXD2880_DVBT2_V121,
CXD2880_DVBT2_V131
};
enum cxd2880_dvbt2_s1 {
CXD2880_DVBT2_S1_BASE_SISO = 0x00,
CXD2880_DVBT2_S1_BASE_MISO = 0x01,
CXD2880_DVBT2_S1_NON_DVBT2 = 0x02,
CXD2880_DVBT2_S1_LITE_SISO = 0x03,
CXD2880_DVBT2_S1_LITE_MISO = 0x04,
CXD2880_DVBT2_S1_RSVD3 = 0x05,
CXD2880_DVBT2_S1_RSVD4 = 0x06,
CXD2880_DVBT2_S1_RSVD5 = 0x07,
CXD2880_DVBT2_S1_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_base_s2 {
CXD2880_DVBT2_BASE_S2_M2K_G_ANY = 0x00,
CXD2880_DVBT2_BASE_S2_M8K_G_DVBT = 0x01,
CXD2880_DVBT2_BASE_S2_M4K_G_ANY = 0x02,
CXD2880_DVBT2_BASE_S2_M1K_G_ANY = 0x03,
CXD2880_DVBT2_BASE_S2_M16K_G_ANY = 0x04,
CXD2880_DVBT2_BASE_S2_M32K_G_DVBT = 0x05,
CXD2880_DVBT2_BASE_S2_M8K_G_DVBT2 = 0x06,
CXD2880_DVBT2_BASE_S2_M32K_G_DVBT2 = 0x07,
CXD2880_DVBT2_BASE_S2_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_lite_s2 {
CXD2880_DVBT2_LITE_S2_M2K_G_ANY = 0x00,
CXD2880_DVBT2_LITE_S2_M8K_G_DVBT = 0x01,
CXD2880_DVBT2_LITE_S2_M4K_G_ANY = 0x02,
CXD2880_DVBT2_LITE_S2_M16K_G_DVBT2 = 0x03,
CXD2880_DVBT2_LITE_S2_M16K_G_DVBT = 0x04,
CXD2880_DVBT2_LITE_S2_RSVD1 = 0x05,
CXD2880_DVBT2_LITE_S2_M8K_G_DVBT2 = 0x06,
CXD2880_DVBT2_LITE_S2_RSVD2 = 0x07,
CXD2880_DVBT2_LITE_S2_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_guard {
CXD2880_DVBT2_G1_32 = 0x00,
CXD2880_DVBT2_G1_16 = 0x01,
CXD2880_DVBT2_G1_8 = 0x02,
CXD2880_DVBT2_G1_4 = 0x03,
CXD2880_DVBT2_G1_128 = 0x04,
CXD2880_DVBT2_G19_128 = 0x05,
CXD2880_DVBT2_G19_256 = 0x06,
CXD2880_DVBT2_G_RSVD1 = 0x07,
CXD2880_DVBT2_G_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_mode {
CXD2880_DVBT2_M2K = 0x00,
CXD2880_DVBT2_M8K = 0x01,
CXD2880_DVBT2_M4K = 0x02,
CXD2880_DVBT2_M1K = 0x03,
CXD2880_DVBT2_M16K = 0x04,
CXD2880_DVBT2_M32K = 0x05,
CXD2880_DVBT2_M_RSVD1 = 0x06,
CXD2880_DVBT2_M_RSVD2 = 0x07
};
enum cxd2880_dvbt2_bw {
CXD2880_DVBT2_BW_8 = 0x00,
CXD2880_DVBT2_BW_7 = 0x01,
CXD2880_DVBT2_BW_6 = 0x02,
CXD2880_DVBT2_BW_5 = 0x03,
CXD2880_DVBT2_BW_10 = 0x04,
CXD2880_DVBT2_BW_1_7 = 0x05,
CXD2880_DVBT2_BW_RSVD1 = 0x06,
CXD2880_DVBT2_BW_RSVD2 = 0x07,
CXD2880_DVBT2_BW_RSVD3 = 0x08,
CXD2880_DVBT2_BW_RSVD4 = 0x09,
CXD2880_DVBT2_BW_RSVD5 = 0x0a,
CXD2880_DVBT2_BW_RSVD6 = 0x0b,
CXD2880_DVBT2_BW_RSVD7 = 0x0c,
CXD2880_DVBT2_BW_RSVD8 = 0x0d,
CXD2880_DVBT2_BW_RSVD9 = 0x0e,
CXD2880_DVBT2_BW_RSVD10 = 0x0f,
CXD2880_DVBT2_BW_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_l1pre_type {
CXD2880_DVBT2_L1PRE_TYPE_TS = 0x00,
CXD2880_DVBT2_L1PRE_TYPE_GS = 0x01,
CXD2880_DVBT2_L1PRE_TYPE_TS_GS = 0x02,
CXD2880_DVBT2_L1PRE_TYPE_RESERVED = 0x03,
CXD2880_DVBT2_L1PRE_TYPE_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_papr {
CXD2880_DVBT2_PAPR_0 = 0x00,
CXD2880_DVBT2_PAPR_1 = 0x01,
CXD2880_DVBT2_PAPR_2 = 0x02,
CXD2880_DVBT2_PAPR_3 = 0x03,
CXD2880_DVBT2_PAPR_RSVD1 = 0x04,
CXD2880_DVBT2_PAPR_RSVD2 = 0x05,
CXD2880_DVBT2_PAPR_RSVD3 = 0x06,
CXD2880_DVBT2_PAPR_RSVD4 = 0x07,
CXD2880_DVBT2_PAPR_RSVD5 = 0x08,
CXD2880_DVBT2_PAPR_RSVD6 = 0x09,
CXD2880_DVBT2_PAPR_RSVD7 = 0x0a,
CXD2880_DVBT2_PAPR_RSVD8 = 0x0b,
CXD2880_DVBT2_PAPR_RSVD9 = 0x0c,
CXD2880_DVBT2_PAPR_RSVD10 = 0x0d,
CXD2880_DVBT2_PAPR_RSVD11 = 0x0e,
CXD2880_DVBT2_PAPR_RSVD12 = 0x0f,
CXD2880_DVBT2_PAPR_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_l1post_constell {
CXD2880_DVBT2_L1POST_BPSK = 0x00,
CXD2880_DVBT2_L1POST_QPSK = 0x01,
CXD2880_DVBT2_L1POST_QAM16 = 0x02,
CXD2880_DVBT2_L1POST_QAM64 = 0x03,
CXD2880_DVBT2_L1POST_C_RSVD1 = 0x04,
CXD2880_DVBT2_L1POST_C_RSVD2 = 0x05,
CXD2880_DVBT2_L1POST_C_RSVD3 = 0x06,
CXD2880_DVBT2_L1POST_C_RSVD4 = 0x07,
CXD2880_DVBT2_L1POST_C_RSVD5 = 0x08,
CXD2880_DVBT2_L1POST_C_RSVD6 = 0x09,
CXD2880_DVBT2_L1POST_C_RSVD7 = 0x0a,
CXD2880_DVBT2_L1POST_C_RSVD8 = 0x0b,
CXD2880_DVBT2_L1POST_C_RSVD9 = 0x0c,
CXD2880_DVBT2_L1POST_C_RSVD10 = 0x0d,
CXD2880_DVBT2_L1POST_C_RSVD11 = 0x0e,
CXD2880_DVBT2_L1POST_C_RSVD12 = 0x0f,
CXD2880_DVBT2_L1POST_CONSTELL_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_l1post_cr {
CXD2880_DVBT2_L1POST_R1_2 = 0x00,
CXD2880_DVBT2_L1POST_R_RSVD1 = 0x01,
CXD2880_DVBT2_L1POST_R_RSVD2 = 0x02,
CXD2880_DVBT2_L1POST_R_RSVD3 = 0x03,
CXD2880_DVBT2_L1POST_R_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_l1post_fec_type {
CXD2880_DVBT2_L1POST_FEC_LDPC16K = 0x00,
CXD2880_DVBT2_L1POST_FEC_RSVD1 = 0x01,
CXD2880_DVBT2_L1POST_FEC_RSVD2 = 0x02,
CXD2880_DVBT2_L1POST_FEC_RSVD3 = 0x03,
CXD2880_DVBT2_L1POST_FEC_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_pp {
CXD2880_DVBT2_PP1 = 0x00,
CXD2880_DVBT2_PP2 = 0x01,
CXD2880_DVBT2_PP3 = 0x02,
CXD2880_DVBT2_PP4 = 0x03,
CXD2880_DVBT2_PP5 = 0x04,
CXD2880_DVBT2_PP6 = 0x05,
CXD2880_DVBT2_PP7 = 0x06,
CXD2880_DVBT2_PP8 = 0x07,
CXD2880_DVBT2_PP_RSVD1 = 0x08,
CXD2880_DVBT2_PP_RSVD2 = 0x09,
CXD2880_DVBT2_PP_RSVD3 = 0x0a,
CXD2880_DVBT2_PP_RSVD4 = 0x0b,
CXD2880_DVBT2_PP_RSVD5 = 0x0c,
CXD2880_DVBT2_PP_RSVD6 = 0x0d,
CXD2880_DVBT2_PP_RSVD7 = 0x0e,
CXD2880_DVBT2_PP_RSVD8 = 0x0f,
CXD2880_DVBT2_PP_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_code_rate {
CXD2880_DVBT2_R1_2 = 0x00,
CXD2880_DVBT2_R3_5 = 0x01,
CXD2880_DVBT2_R2_3 = 0x02,
CXD2880_DVBT2_R3_4 = 0x03,
CXD2880_DVBT2_R4_5 = 0x04,
CXD2880_DVBT2_R5_6 = 0x05,
CXD2880_DVBT2_R1_3 = 0x06,
CXD2880_DVBT2_R2_5 = 0x07,
CXD2880_DVBT2_PLP_CR_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_constell {
CXD2880_DVBT2_QPSK = 0x00,
CXD2880_DVBT2_QAM16 = 0x01,
CXD2880_DVBT2_QAM64 = 0x02,
CXD2880_DVBT2_QAM256 = 0x03,
CXD2880_DVBT2_CON_RSVD1 = 0x04,
CXD2880_DVBT2_CON_RSVD2 = 0x05,
CXD2880_DVBT2_CON_RSVD3 = 0x06,
CXD2880_DVBT2_CON_RSVD4 = 0x07,
CXD2880_DVBT2_CONSTELL_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_type {
CXD2880_DVBT2_PLP_TYPE_COMMON = 0x00,
CXD2880_DVBT2_PLP_TYPE_DATA1 = 0x01,
CXD2880_DVBT2_PLP_TYPE_DATA2 = 0x02,
CXD2880_DVBT2_PLP_TYPE_RSVD1 = 0x03,
CXD2880_DVBT2_PLP_TYPE_RSVD2 = 0x04,
CXD2880_DVBT2_PLP_TYPE_RSVD3 = 0x05,
CXD2880_DVBT2_PLP_TYPE_RSVD4 = 0x06,
CXD2880_DVBT2_PLP_TYPE_RSVD5 = 0x07,
CXD2880_DVBT2_PLP_TYPE_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_payload {
CXD2880_DVBT2_PLP_PAYLOAD_GFPS = 0x00,
CXD2880_DVBT2_PLP_PAYLOAD_GCS = 0x01,
CXD2880_DVBT2_PLP_PAYLOAD_GSE = 0x02,
CXD2880_DVBT2_PLP_PAYLOAD_TS = 0x03,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD1 = 0x04,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD2 = 0x05,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD3 = 0x06,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD4 = 0x07,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD5 = 0x08,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD6 = 0x09,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD7 = 0x0a,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD8 = 0x0b,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD9 = 0x0c,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD10 = 0x0d,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD11 = 0x0e,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD12 = 0x0f,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD13 = 0x10,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD14 = 0x11,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD15 = 0x12,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD16 = 0x13,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD17 = 0x14,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD18 = 0x15,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD19 = 0x16,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD20 = 0x17,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD21 = 0x18,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD22 = 0x19,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD23 = 0x1a,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD24 = 0x1b,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD25 = 0x1c,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD26 = 0x1d,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD27 = 0x1e,
CXD2880_DVBT2_PLP_PAYLOAD_RSVD28 = 0x1f,
CXD2880_DVBT2_PLP_PAYLOAD_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_fec {
CXD2880_DVBT2_FEC_LDPC_16K = 0x00,
CXD2880_DVBT2_FEC_LDPC_64K = 0x01,
CXD2880_DVBT2_FEC_RSVD1 = 0x02,
CXD2880_DVBT2_FEC_RSVD2 = 0x03,
CXD2880_DVBT2_FEC_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_mode {
CXD2880_DVBT2_PLP_MODE_NOTSPECIFIED = 0x00,
CXD2880_DVBT2_PLP_MODE_NM = 0x01,
CXD2880_DVBT2_PLP_MODE_HEM = 0x02,
CXD2880_DVBT2_PLP_MODE_RESERVED = 0x03,
CXD2880_DVBT2_PLP_MODE_UNKNOWN = 0xff
};
enum cxd2880_dvbt2_plp_btype {
CXD2880_DVBT2_PLP_COMMON,
CXD2880_DVBT2_PLP_DATA
};
enum cxd2880_dvbt2_stream {
CXD2880_DVBT2_STREAM_GENERIC_PACKETIZED = 0x00,
CXD2880_DVBT2_STREAM_GENERIC_CONTINUOUS = 0x01,
CXD2880_DVBT2_STREAM_GENERIC_ENCAPSULATED = 0x02,
CXD2880_DVBT2_STREAM_TRANSPORT = 0x03,
CXD2880_DVBT2_STREAM_UNKNOWN = 0xff
};
struct cxd2880_dvbt2_l1pre {
enum cxd2880_dvbt2_l1pre_type type;
u8 bw_ext;
enum cxd2880_dvbt2_s1 s1;
u8 s2;
u8 mixed;
enum cxd2880_dvbt2_mode fft_mode;
u8 l1_rep;
enum cxd2880_dvbt2_guard gi;
enum cxd2880_dvbt2_papr papr;
enum cxd2880_dvbt2_l1post_constell mod;
enum cxd2880_dvbt2_l1post_cr cr;
enum cxd2880_dvbt2_l1post_fec_type fec;
u32 l1_post_size;
u32 l1_post_info_size;
enum cxd2880_dvbt2_pp pp;
u8 tx_id_availability;
u16 cell_id;
u16 network_id;
u16 sys_id;
u8 num_frames;
u16 num_symbols;
u8 regen;
u8 post_ext;
u8 num_rf_freqs;
u8 rf_idx;
enum cxd2880_dvbt2_version t2_version;
u8 l1_post_scrambled;
u8 t2_base_lite;
u32 crc32;
};
struct cxd2880_dvbt2_plp {
u8 id;
enum cxd2880_dvbt2_plp_type type;
enum cxd2880_dvbt2_plp_payload payload;
u8 ff;
u8 first_rf_idx;
u8 first_frm_idx;
u8 group_id;
enum cxd2880_dvbt2_plp_constell constell;
enum cxd2880_dvbt2_plp_code_rate plp_cr;
u8 rot;
enum cxd2880_dvbt2_plp_fec fec;
u16 num_blocks_max;
u8 frm_int;
u8 til_len;
u8 til_type;
u8 in_band_a_flag;
u8 in_band_b_flag;
u16 rsvd;
enum cxd2880_dvbt2_plp_mode plp_mode;
u8 static_flag;
u8 static_padding_flag;
};
struct cxd2880_dvbt2_l1post {
u16 sub_slices_per_frame;
u8 num_plps;
u8 num_aux;
u8 aux_cfg_rfu;
u8 rf_idx;
u32 freq;
u8 fef_type;
u32 fef_length;
u8 fef_intvl;
};
struct cxd2880_dvbt2_ofdm {
u8 mixed;
u8 is_miso;
enum cxd2880_dvbt2_mode mode;
enum cxd2880_dvbt2_guard gi;
enum cxd2880_dvbt2_pp pp;
u8 bw_ext;
enum cxd2880_dvbt2_papr papr;
u16 num_symbols;
};
struct cxd2880_dvbt2_bbheader {
enum cxd2880_dvbt2_stream stream_input;
u8 is_single_input_stream;
u8 is_constant_coding_modulation;
u8 issy_indicator;
u8 null_packet_deletion;
u8 ext;
u8 input_stream_identifier;
u16 user_packet_length;
u16 data_field_length;
u8 sync_byte;
u32 issy;
enum cxd2880_dvbt2_plp_mode plp_mode;
};
#endif
|
/*
* Copyright (C) 2017 Jernej Skrabec <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef _SUN8I_UI_SCALER_H_
#define _SUN8I_UI_SCALER_H_
#include "sun8i_mixer.h"
#define DE2_UI_SCALER_UNIT_SIZE 0x10000
#define DE3_UI_SCALER_UNIT_SIZE 0x08000
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_UI_SCALER_SCALE_MIN 1
#define SUN8I_UI_SCALER_SCALE_MAX ((1UL << 20) - 1)
#define SUN8I_UI_SCALER_SCALE_FRAC 20
#define SUN8I_UI_SCALER_PHASE_FRAC 20
#define SUN8I_UI_SCALER_COEFF_COUNT 16
#define SUN8I_UI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
#define SUN8I_SCALER_GSU_CTRL(base) ((base) + 0x0)
#define SUN8I_SCALER_GSU_OUTSIZE(base) ((base) + 0x40)
#define SUN8I_SCALER_GSU_INSIZE(base) ((base) + 0x80)
#define SUN8I_SCALER_GSU_HSTEP(base) ((base) + 0x88)
#define SUN8I_SCALER_GSU_VSTEP(base) ((base) + 0x8c)
#define SUN8I_SCALER_GSU_HPHASE(base) ((base) + 0x90)
#define SUN8I_SCALER_GSU_VPHASE(base) ((base) + 0x98)
#define SUN8I_SCALER_GSU_HCOEFF(base, index) ((base) + 0x200 + 0x4 * (index))
#define SUN8I_SCALER_GSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4)
void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase);
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
#include "accessors.h"
#include "tree-checker.h"
#include "volumes.h"
#include "raid-stripe-tree.h"
struct root_name_map {
u64 id;
const char *name;
};
static const struct root_name_map root_map[] = {
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" },
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" },
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" },
{ BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" },
{ BTRFS_FS_TREE_OBJECTID, "FS_TREE" },
{ BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" },
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" },
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" },
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" },
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" },
{ BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
{ BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
{
int i;
if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
"TREE_RELOC offset=%llu", key->offset);
return buf;
}
for (i = 0; i < ARRAY_SIZE(root_map); i++) {
if (root_map[i].id == key->objectid)
return root_map[i].name;
}
snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
return buf;
}
static void print_chunk(const struct extent_buffer *eb, struct btrfs_chunk *chunk)
{
int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
int i;
pr_info("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk),
btrfs_chunk_type(eb, chunk), num_stripes);
for (i = 0 ; i < num_stripes ; i++) {
pr_info("\t\t\tstripe %d devid %llu offset %llu\n", i,
btrfs_stripe_devid_nr(eb, chunk, i),
btrfs_stripe_offset_nr(eb, chunk, i));
}
}
static void print_dev_item(const struct extent_buffer *eb,
struct btrfs_dev_item *dev_item)
{
pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
btrfs_device_id(eb, dev_item),
btrfs_device_total_bytes(eb, dev_item),
btrfs_device_bytes_used(eb, dev_item));
}
static void print_extent_data_ref(const struct extent_buffer *eb,
struct btrfs_extent_data_ref *ref)
{
pr_cont("extent data backref root %llu objectid %llu offset %llu count %u\n",
btrfs_extent_data_ref_root(eb, ref),
btrfs_extent_data_ref_objectid(eb, ref),
btrfs_extent_data_ref_offset(eb, ref),
btrfs_extent_data_ref_count(eb, ref));
}
static void print_extent_owner_ref(const struct extent_buffer *eb,
const struct btrfs_extent_owner_ref *ref)
{
ASSERT(btrfs_fs_incompat(eb->fs_info, SIMPLE_QUOTA));
pr_cont("extent data owner root %llu\n", btrfs_extent_owner_ref_root_id(eb, ref));
}
static void print_extent_item(const struct extent_buffer *eb, int slot, int type)
{
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_extent_owner_ref *oref;
struct btrfs_disk_key key;
unsigned long end;
unsigned long ptr;
u32 item_size = btrfs_item_size(eb, slot);
u64 flags;
u64 offset;
int ref_index = 0;
if (unlikely(item_size < sizeof(*ei))) {
btrfs_err(eb->fs_info,
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
return;
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
pr_info("\t\textent refs %llu gen %llu flags %llu\n",
btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
flags);
if ((type == BTRFS_EXTENT_ITEM_KEY) &&
flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
btrfs_disk_key_objectid(&key), key.type,
btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
iref = (struct btrfs_extent_inline_ref *)(info + 1);
} else {
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
}
ptr = (unsigned long)iref;
end = (unsigned long)ei + item_size;
while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_extent_inline_ref_type(eb, iref);
offset = btrfs_extent_inline_ref_offset(eb, iref);
pr_info("\t\tref#%d: ", ref_index++);
switch (type) {
case BTRFS_TREE_BLOCK_REF_KEY:
pr_cont("tree block backref root %llu\n", offset);
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
pr_cont("shared block backref parent %llu\n", offset);
/*
* offset is supposed to be a tree block which
* must be aligned to nodesize.
*/
if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
pr_info(
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
print_extent_data_ref(eb, dref);
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = (struct btrfs_shared_data_ref *)(iref + 1);
pr_cont("shared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
/*
* Offset is supposed to be a tree block which must be
* aligned to sectorsize.
*/
if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
pr_info(
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
print_extent_owner_ref(eb, oref);
break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
eb->start, type);
return;
}
ptr += btrfs_extent_inline_ref_size(type);
}
WARN_ON(ptr > end);
}
static void print_uuid_item(const struct extent_buffer *l, unsigned long offset,
u32 item_size)
{
if (!IS_ALIGNED(item_size, sizeof(u64))) {
pr_warn("BTRFS: uuid item with illegal size %lu!\n",
(unsigned long)item_size);
return;
}
while (item_size) {
__le64 subvol_id;
read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
pr_info("\t\tsubvol_id %llu\n", le64_to_cpu(subvol_id));
item_size -= sizeof(u64);
offset += sizeof(u64);
}
}
static void print_raid_stripe_key(const struct extent_buffer *eb, u32 item_size,
struct btrfs_stripe_extent *stripe)
{
const int num_stripes = btrfs_num_raid_stripes(item_size);
for (int i = 0; i < num_stripes; i++)
pr_info("\t\t\tstride %d devid %llu physical %llu\n",
i, btrfs_raid_stride_devid(eb, &stripe->strides[i]),
btrfs_raid_stride_physical(eb, &stripe->strides[i]));
}
/*
* Helper to output refs and locking status of extent buffer. Useful to debug
* race condition related problems.
*/
static void print_eb_refs_lock(const struct extent_buffer *eb)
{
#ifdef CONFIG_BTRFS_DEBUG
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
atomic_read(&eb->refs), eb->lock_owner, current->pid);
#endif
}
void btrfs_print_leaf(const struct extent_buffer *l)
{
struct btrfs_fs_info *fs_info;
int i;
u32 type, nr;
struct btrfs_root_item *ri;
struct btrfs_dir_item *di;
struct btrfs_inode_item *ii;
struct btrfs_block_group_item *bi;
struct btrfs_file_extent_item *fi;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
struct btrfs_key key;
struct btrfs_key found_key;
if (!l)
return;
fs_info = l->fs_info;
nr = btrfs_header_nritems(l);
btrfs_info(fs_info,
"leaf %llu gen %llu total ptrs %d free space %d owner %llu",
btrfs_header_bytenr(l), btrfs_header_generation(l), nr,
btrfs_leaf_free_space(l), btrfs_header_owner(l));
print_eb_refs_lock(l);
for (i = 0 ; i < nr ; i++) {
btrfs_item_key_to_cpu(l, &key, i);
type = key.type;
pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
i, key.objectid, type, key.offset,
btrfs_item_offset(l, i), btrfs_item_size(l, i));
switch (type) {
case BTRFS_INODE_ITEM_KEY:
ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
pr_info("\t\tinode generation %llu size %llu mode %o\n",
btrfs_inode_generation(l, ii),
btrfs_inode_size(l, ii),
btrfs_inode_mode(l, ii));
break;
case BTRFS_DIR_ITEM_KEY:
di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(l, di, &found_key);
pr_info("\t\tdir oid %llu flags %u\n",
found_key.objectid,
btrfs_dir_flags(l, di));
break;
case BTRFS_ROOT_ITEM_KEY:
ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
pr_info("\t\troot data bytenr %llu refs %u\n",
btrfs_disk_root_bytenr(l, ri),
btrfs_disk_root_refs(l, ri));
break;
case BTRFS_EXTENT_ITEM_KEY:
case BTRFS_METADATA_ITEM_KEY:
print_extent_item(l, i, type);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
pr_info("\t\ttree block backref\n");
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
pr_info("\t\tshared block backref\n");
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = btrfs_item_ptr(l, i,
struct btrfs_extent_data_ref);
print_extent_data_ref(l, dref);
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = btrfs_item_ptr(l, i,
struct btrfs_shared_data_ref);
pr_info("\t\tshared data backref count %u\n",
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
fi = btrfs_item_ptr(l, i,
struct btrfs_file_extent_item);
pr_info("\t\tgeneration %llu type %hhu\n",
btrfs_file_extent_generation(l, fi),
btrfs_file_extent_type(l, fi));
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
pr_info("\t\tinline extent data size %llu\n",
btrfs_file_extent_ram_bytes(l, fi));
break;
}
pr_info("\t\textent data disk bytenr %llu nr %llu\n",
btrfs_file_extent_disk_bytenr(l, fi),
btrfs_file_extent_disk_num_bytes(l, fi));
pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
btrfs_file_extent_offset(l, fi),
btrfs_file_extent_num_bytes(l, fi),
btrfs_file_extent_ram_bytes(l, fi));
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);
pr_info(
"\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
btrfs_block_group_used(l, bi),
btrfs_block_group_chunk_objectid(l, bi),
btrfs_block_group_flags(l, bi));
break;
case BTRFS_CHUNK_ITEM_KEY:
print_chunk(l, btrfs_item_ptr(l, i,
struct btrfs_chunk));
break;
case BTRFS_DEV_ITEM_KEY:
print_dev_item(l, btrfs_item_ptr(l, i,
struct btrfs_dev_item));
break;
case BTRFS_DEV_EXTENT_KEY:
dev_extent = btrfs_item_ptr(l, i,
struct btrfs_dev_extent);
pr_info("\t\tdev extent chunk_tree %llu\n\t\tchunk objectid %llu chunk offset %llu length %llu\n",
btrfs_dev_extent_chunk_tree(l, dev_extent),
btrfs_dev_extent_chunk_objectid(l, dev_extent),
btrfs_dev_extent_chunk_offset(l, dev_extent),
btrfs_dev_extent_length(l, dev_extent));
break;
case BTRFS_PERSISTENT_ITEM_KEY:
pr_info("\t\tpersistent item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_DEV_STATS_OBJECTID:
pr_info("\t\tdevice stats\n");
break;
default:
pr_info("\t\tunknown persistent item\n");
}
break;
case BTRFS_TEMPORARY_ITEM_KEY:
pr_info("\t\ttemporary item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_BALANCE_OBJECTID:
pr_info("\t\tbalance status\n");
break;
default:
pr_info("\t\tunknown temporary item\n");
}
break;
case BTRFS_DEV_REPLACE_KEY:
pr_info("\t\tdev replace\n");
break;
case BTRFS_UUID_KEY_SUBVOL:
case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
print_uuid_item(l, btrfs_item_ptr_offset(l, i),
btrfs_item_size(l, i));
break;
case BTRFS_RAID_STRIPE_KEY:
print_raid_stripe_key(l, btrfs_item_size(l, i),
btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
break;
}
}
}
void btrfs_print_tree(const struct extent_buffer *c, bool follow)
{
struct btrfs_fs_info *fs_info;
int i; u32 nr;
struct btrfs_key key;
int level;
if (!c)
return;
fs_info = c->fs_info;
nr = btrfs_header_nritems(c);
level = btrfs_header_level(c);
if (level == 0) {
btrfs_print_leaf(c);
return;
}
btrfs_info(fs_info,
"node %llu level %d gen %llu total ptrs %d free spc %u owner %llu",
btrfs_header_bytenr(c), level, btrfs_header_generation(c),
nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr,
btrfs_header_owner(c));
print_eb_refs_lock(c);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n",
i, key.objectid, key.type, key.offset,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
}
if (!follow)
return;
for (i = 0; i < nr; i++) {
struct btrfs_tree_parent_check check = {
.level = level - 1,
.transid = btrfs_node_ptr_generation(c, i),
.owner_root = btrfs_header_owner(c),
.has_first_key = true
};
struct extent_buffer *next;
btrfs_node_key_to_cpu(c, &check.first_key, i);
next = read_tree_block(fs_info, btrfs_node_blockptr(c, i), &check);
if (IS_ERR(next))
continue;
if (!extent_buffer_uptodate(next)) {
free_extent_buffer(next);
continue;
}
if (btrfs_is_leaf(next) &&
level != 1)
BUG();
if (btrfs_header_level(next) !=
level - 1)
BUG();
btrfs_print_tree(next, follow);
free_extent_buffer(next);
}
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Error log support on PowerNV.
*
* Copyright 2013,2014 IBM Corp.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/fcntl.h>
#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/opal.h>
struct elog_obj {
struct kobject kobj;
struct bin_attribute raw_attr;
uint64_t id;
uint64_t type;
size_t size;
char *buffer;
};
#define to_elog_obj(x) container_of(x, struct elog_obj, kobj)
struct elog_attribute {
struct attribute attr;
ssize_t (*show)(struct elog_obj *elog, struct elog_attribute *attr,
char *buf);
ssize_t (*store)(struct elog_obj *elog, struct elog_attribute *attr,
const char *buf, size_t count);
};
#define to_elog_attr(x) container_of(x, struct elog_attribute, attr)
static ssize_t elog_id_show(struct elog_obj *elog_obj,
struct elog_attribute *attr,
char *buf)
{
return sprintf(buf, "0x%llx\n", elog_obj->id);
}
static const char *elog_type_to_string(uint64_t type)
{
switch (type) {
case 0: return "PEL";
default: return "unknown";
}
}
static ssize_t elog_type_show(struct elog_obj *elog_obj,
struct elog_attribute *attr,
char *buf)
{
return sprintf(buf, "0x%llx %s\n",
elog_obj->type,
elog_type_to_string(elog_obj->type));
}
static ssize_t elog_ack_show(struct elog_obj *elog_obj,
struct elog_attribute *attr,
char *buf)
{
return sprintf(buf, "ack - acknowledge log message\n");
}
static ssize_t elog_ack_store(struct elog_obj *elog_obj,
struct elog_attribute *attr,
const char *buf,
size_t count)
{
/*
* Try to self remove this attribute. If we are successful,
* delete the kobject itself.
*/
if (sysfs_remove_file_self(&elog_obj->kobj, &attr->attr)) {
opal_send_ack_elog(elog_obj->id);
kobject_put(&elog_obj->kobj);
}
return count;
}
static struct elog_attribute id_attribute =
__ATTR(id, 0444, elog_id_show, NULL);
static struct elog_attribute type_attribute =
__ATTR(type, 0444, elog_type_show, NULL);
static struct elog_attribute ack_attribute =
__ATTR(acknowledge, 0660, elog_ack_show, elog_ack_store);
static struct kset *elog_kset;
static ssize_t elog_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct elog_attribute *attribute;
struct elog_obj *elog;
attribute = to_elog_attr(attr);
elog = to_elog_obj(kobj);
if (!attribute->show)
return -EIO;
return attribute->show(elog, attribute, buf);
}
static ssize_t elog_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct elog_attribute *attribute;
struct elog_obj *elog;
attribute = to_elog_attr(attr);
elog = to_elog_obj(kobj);
if (!attribute->store)
return -EIO;
return attribute->store(elog, attribute, buf, len);
}
static const struct sysfs_ops elog_sysfs_ops = {
.show = elog_attr_show,
.store = elog_attr_store,
};
static void elog_release(struct kobject *kobj)
{
struct elog_obj *elog;
elog = to_elog_obj(kobj);
kfree(elog->buffer);
kfree(elog);
}
static struct attribute *elog_default_attrs[] = {
&id_attribute.attr,
&type_attribute.attr,
&ack_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(elog_default);
static const struct kobj_type elog_ktype = {
.sysfs_ops = &elog_sysfs_ops,
.release = &elog_release,
.default_groups = elog_default_groups,
};
/* Maximum size of a single log on FSP is 16KB */
#define OPAL_MAX_ERRLOG_SIZE 16384
static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int opal_rc;
struct elog_obj *elog = to_elog_obj(kobj);
/* We may have had an error reading before, so let's retry */
if (!elog->buffer) {
elog->buffer = kzalloc(elog->size, GFP_KERNEL);
if (!elog->buffer)
return -EIO;
opal_rc = opal_read_elog(__pa(elog->buffer),
elog->size, elog->id);
if (opal_rc != OPAL_SUCCESS) {
pr_err_ratelimited("ELOG: log read failed for log-id=%llx\n",
elog->id);
kfree(elog->buffer);
elog->buffer = NULL;
return -EIO;
}
}
memcpy(buffer, elog->buffer + pos, count);
return count;
}
static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
{
struct elog_obj *elog;
int rc;
elog = kzalloc(sizeof(*elog), GFP_KERNEL);
if (!elog)
return;
elog->kobj.kset = elog_kset;
kobject_init(&elog->kobj, &elog_ktype);
sysfs_bin_attr_init(&elog->raw_attr);
elog->raw_attr.attr.name = "raw";
elog->raw_attr.attr.mode = 0400;
elog->raw_attr.size = size;
elog->raw_attr.read = raw_attr_read;
elog->id = id;
elog->size = size;
elog->type = type;
elog->buffer = kzalloc(elog->size, GFP_KERNEL);
if (elog->buffer) {
rc = opal_read_elog(__pa(elog->buffer),
elog->size, elog->id);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: log read failed for log-id=%llx\n",
elog->id);
kfree(elog->buffer);
elog->buffer = NULL;
}
}
rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
if (rc) {
kobject_put(&elog->kobj);
return;
}
/*
* As soon as the sysfs file for this elog is created/activated there is
* a chance the opal_errd daemon (or any userspace) might read and
* acknowledge the elog before kobject_uevent() is called. If that
* happens then there is a potential race between
* elog_ack_store->kobject_put() and kobject_uevent() which leads to a
* use-after-free of a kernfs object resulting in a kernel crash.
*
* To avoid that, we need to take a reference on behalf of the bin file,
* so that our reference remains valid while we call kobject_uevent().
* We then drop our reference before exiting the function, leaving the
* bin file to drop the last reference (if it hasn't already).
*/
/* Take a reference for the bin file */
kobject_get(&elog->kobj);
rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
if (rc == 0) {
kobject_uevent(&elog->kobj, KOBJ_ADD);
} else {
/* Drop the reference taken for the bin file */
kobject_put(&elog->kobj);
}
/* Drop our reference */
kobject_put(&elog->kobj);
return;
}
static irqreturn_t elog_event(int irq, void *data)
{
__be64 size;
__be64 id;
__be64 type;
uint64_t elog_size;
uint64_t log_id;
uint64_t elog_type;
int rc;
char name[2+16+1];
struct kobject *kobj;
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: OPAL log info read failed\n");
return IRQ_HANDLED;
}
elog_size = be64_to_cpu(size);
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
elog_size = OPAL_MAX_ERRLOG_SIZE;
sprintf(name, "0x%llx", log_id);
/* we may get notified twice, let's handle
* that gracefully and not create two conflicting
* entries.
*/
kobj = kset_find_obj(elog_kset, name);
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
return IRQ_HANDLED;
}
create_elog_obj(log_id, elog_size, elog_type);
return IRQ_HANDLED;
}
int __init opal_elog_init(void)
{
int rc = 0, irq;
/* ELOG not supported by firmware */
if (!opal_check_token(OPAL_ELOG_READ))
return -1;
elog_kset = kset_create_and_add("elog", NULL, opal_kobj);
if (!elog_kset) {
pr_warn("%s: failed to create elog kset\n", __func__);
return -1;
}
irq = opal_event_request(ilog2(OPAL_EVENT_ERROR_LOG_AVAIL));
if (!irq) {
pr_err("%s: Can't register OPAL event irq (%d)\n",
__func__, irq);
return irq;
}
rc = request_threaded_irq(irq, NULL, elog_event,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
if (rc) {
pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, rc);
return rc;
}
/* We are now ready to pull error logs from opal. */
if (opal_check_token(OPAL_ELOG_RESEND))
opal_resend_pending_logs();
return 0;
}
|
/*
* Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*/
#ifndef _XTENSA_REGS_H
#define _XTENSA_REGS_H
/* Special registers. */
#define SREG_MR 32
#define SREG_IBREAKENABLE 96
#define SREG_IBREAKA 128
#define SREG_DBREAKA 144
#define SREG_DBREAKC 160
#define SREG_EPC 176
#define SREG_EPS 192
#define SREG_EXCSAVE 208
#define SREG_CCOMPARE 240
#define SREG_MISC 244
/* EXCCAUSE register fields */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
#define EXCCAUSE_SYSTEM_CALL 1
#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
#define EXCCAUSE_LOAD_STORE_ERROR 3
#define EXCCAUSE_LEVEL1_INTERRUPT 4
#define EXCCAUSE_ALLOCA 5
#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_INSTR_DATA_ERROR 12
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
#define EXCCAUSE_INSTR_ADDR_ERROR 14
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
#define EXCCAUSE_DTLB_MISS 24
#define EXCCAUSE_DTLB_MULTIHIT 25
#define EXCCAUSE_DTLB_PRIVILEGE 26
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_COPROCESSOR0_DISABLED 32
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
#define EXCCAUSE_N 64
/* PS register fields. */
#define PS_WOE_BIT 18
#define PS_WOE_MASK 0x00040000
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
#define PS_OWB_WIDTH 4
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_WIDTH 4
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
#define DBREAKC_MASK_BIT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOAD_BIT 30
#define DBREAKC_LOAD_MASK 0x40000000
#define DBREAKC_STOR_BIT 31
#define DBREAKC_STOR_MASK 0x80000000
/* DEBUGCAUSE register fields. */
#define DEBUGCAUSE_DBNUM_MASK 0xf00
#define DEBUGCAUSE_DBNUM_SHIFT 8 /* First bit of DBNUM field */
#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2021, Intel Corporation. */
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
* ice_repr_inc_tx_stats - increment Tx statistic by one packet
* @repr: repr to increment stats on
* @len: length of the packet
* @xmit_status: value returned by xmit function
*/
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status)
{
struct ice_repr_pcpu_stats *stats;
if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
xmit_status != NET_XMIT_CN)) {
this_cpu_inc(repr->stats->tx_drops);
return;
}
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
/**
* ice_repr_inc_rx_stats - increment Rx statistic by one packet
* @netdev: repr netdev to increment stats on
* @len: length of the packet
*/
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_repr_pcpu_stats *stats;
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
/**
* ice_repr_get_stats64 - get VF stats for VFPR use
* @netdev: pointer to port representor netdev
* @stats: pointer to struct where stats can be stored
*/
static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
if (repr->ops.ready(repr))
return;
vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
eth_stats->tx_multicast;
stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
eth_stats->rx_multicast;
stats->tx_bytes = eth_stats->tx_bytes;
stats->rx_bytes = eth_stats->rx_bytes;
stats->multicast = eth_stats->rx_multicast;
stats->tx_errors = eth_stats->tx_errors;
stats->tx_dropped = eth_stats->tx_discards;
stats->rx_dropped = eth_stats->rx_discards;
}
/**
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return np->repr;
}
/**
* ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
* interface is made active by the system (IFF_UP). Corresponding
* VF is notified about link status change.
*
* Returns 0 on success
*/
static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
vf = repr->vf;
vf->link_forced = true;
vf->link_up = true;
ice_vc_notify_vf_link_state(vf);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
return 0;
}
static int ice_repr_sf_open(struct net_device *netdev)
{
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
return 0;
}
/**
* ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
* interface is de-activated by the system. Corresponding
* VF is notified about link status change.
*
* Returns 0 on success
*/
static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
vf = repr->vf;
vf->link_forced = true;
vf->link_up = false;
ice_vc_notify_vf_link_state(vf);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
return 0;
}
static int ice_repr_sf_stop(struct net_device *netdev)
{
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
return 0;
}
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ice_repr *repr = ice_netdev_to_repr(dev);
int i;
for_each_possible_cpu(i) {
u64 tbytes, tpkts, tdrops, rbytes, rpkts;
struct ice_repr_pcpu_stats *repr_stats;
unsigned int start;
repr_stats = per_cpu_ptr(repr->stats, i);
do {
start = u64_stats_fetch_begin(&repr_stats->syncp);
tbytes = repr_stats->tx_bytes;
tpkts = repr_stats->tx_packets;
tdrops = repr_stats->tx_drops;
rbytes = repr_stats->rx_bytes;
rpkts = repr_stats->rx_packets;
} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
stats->tx_dropped += tdrops;
stats->rx_bytes += rbytes;
stats->rx_packets += rpkts;
}
return 0;
}
static bool
ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
{
return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
}
static int
ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
return -EINVAL;
}
static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower)
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
return -EINVAL;
}
}
static int
ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_repr_setup_tc_cls_flower(np->repr, flower);
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(ice_repr_block_cb_list);
static int
ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
switch (type) {
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple((struct flow_block_offload *)
type_data,
&ice_repr_block_cb_list,
ice_repr_setup_tc_block_cb,
np, np, true);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops ice_repr_vf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_vf_open,
.ndo_stop = ice_repr_vf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
static const struct net_device_ops ice_repr_sf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_sf_open,
.ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
* @ops: new ops for netdev
*/
static int
ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
return register_netdev(netdev);
}
static int ice_repr_ready_vf(struct ice_repr *repr)
{
return !ice_check_vf_ready_for_cfg(repr->vf);
}
static int ice_repr_ready_sf(struct ice_repr *repr)
{
return !repr->sf->active;
}
/**
* ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
void ice_repr_destroy(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
}
static void ice_repr_rem_sf(struct ice_repr *repr)
{
unregister_netdev(repr->netdev);
ice_devlink_destroy_sf_port(repr->sf);
}
static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
* ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
*/
static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
int err;
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
goto err_alloc;
}
repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
if (!repr->stats) {
err = -ENOMEM;
goto err_stats;
}
repr->src_vsi = src_vsi;
repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
err_stats:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
return ERR_PTR(err);
}
static int ice_repr_add_vf(struct ice_repr *repr)
{
struct ice_vf *vf = repr->vf;
struct devlink *devlink;
int err;
err = ice_devlink_create_vf_port(vf);
if (err)
return err;
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
if (err)
goto err_netdev;
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
devlink = priv_to_devlink(vf->pf);
ice_repr_set_tx_topology(vf->pf, devlink);
return 0;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
return err;
}
/**
* ice_repr_create_vf - add representor for VF VSI
* @vf: VF to create port representor on
*
* Set correct representor type for VF and functions pointer.
*
* Return: created port representor on success, error otherwise
*/
struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_repr *repr;
if (!vsi)
return ERR_PTR(-EINVAL);
repr = ice_repr_create(vsi);
if (IS_ERR(repr))
return repr;
repr->type = ICE_REPR_TYPE_VF;
repr->vf = vf;
repr->ops.add = ice_repr_add_vf;
repr->ops.rem = ice_repr_rem_vf;
repr->ops.ready = ice_repr_ready_vf;
ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
return repr;
}
static int ice_repr_add_sf(struct ice_repr *repr)
{
struct ice_dynamic_port *sf = repr->sf;
int err;
err = ice_devlink_create_sf_port(sf);
if (err)
return err;
SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
if (err)
goto err_netdev;
ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
return 0;
err_netdev:
ice_devlink_destroy_sf_port(sf);
return err;
}
/**
* ice_repr_create_sf - add representor for SF VSI
* @sf: SF to create port representor on
*
* Set correct representor type for SF and functions pointer.
*
* Return: created port representor on success, error otherwise
*/
struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
{
struct ice_repr *repr = ice_repr_create(sf->vsi);
if (IS_ERR(repr))
return repr;
repr->type = ICE_REPR_TYPE_SF;
repr->sf = sf;
repr->ops.add = ice_repr_add_sf;
repr->ops.rem = ice_repr_rem_sf;
repr->ops.ready = ice_repr_ready_sf;
ether_addr_copy(repr->parent_mac, sf->hw_addr);
return repr;
}
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
{
return xa_load(&pf->eswitch.reprs, id);
}
/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
void ice_repr_start_tx_queues(struct ice_repr *repr)
{
netif_carrier_on(repr->netdev);
netif_tx_start_all_queues(repr->netdev);
}
/**
* ice_repr_stop_tx_queues - stop Tx queues of port representor
* @repr: pointer to repr structure
*/
void ice_repr_stop_tx_queues(struct ice_repr *repr)
{
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
/* Copyright (c) 2021 Google */
#include <assert.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <api/fs/fs.h>
#include <perf/bpf_perf.h>
#include "affinity.h"
#include "bpf_counter.h"
#include "cgroup.h"
#include "counts.h"
#include "debug.h"
#include "evsel.h"
#include "evlist.h"
#include "target.h"
#include "cpumap.h"
#include "thread_map.h"
#include "bpf_skel/bperf_cgroup.skel.h"
static struct perf_event_attr cgrp_switch_attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CGROUP_SWITCHES,
.size = sizeof(cgrp_switch_attr),
.sample_period = 1,
.disabled = 1,
};
static struct evsel *cgrp_switch;
static struct bperf_cgroup_bpf *skel;
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
static int bperf_load_program(struct evlist *evlist)
{
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
int i, j;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
int map_size, map_fd;
int prog_fd, err;
skel = bperf_cgroup_bpf__open();
if (!skel) {
pr_err("Failed to open cgroup skeleton\n");
return -1;
}
skel->rodata->num_cpus = total_cpus;
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
if (cgroup_is_v2("perf_event") > 0)
skel->rodata->use_cgroup_v2 = 1;
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
/* we need one copy of events per cpu for reading */
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.events, map_size);
bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
/* previous result is saved in a per-cpu array */
map_size = evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
/* cgroup result needs all events (per-cpu) */
map_size = evlist->core.nr_entries;
bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
set_max_rlimit();
err = bperf_cgroup_bpf__load(skel);
if (err) {
pr_err("Failed to load cgroup skeleton\n");
goto out;
}
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr);
if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
pr_err("Failed to open cgroup switches event\n");
goto out;
}
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
FD(cgrp_switch, i));
if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link);
goto out;
}
}
/*
* Update cgrp_idx map from cgroup-id to event index.
*/
cgrp = NULL;
i = 0;
evlist__for_each_entry(evlist, evsel) {
if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
leader_cgrp = evsel->cgrp;
evsel->cgrp = NULL;
/* open single copy of the events w/o cgroup */
err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
if (err == 0)
evsel->supported = true;
map_fd = bpf_map__fd(skel->maps.events);
perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
int fd = FD(evsel, j);
__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
}
evsel->cgrp = leader_cgrp;
}
if (evsel->cgrp == cgrp)
continue;
cgrp = evsel->cgrp;
if (read_cgroup_id(cgrp) < 0) {
pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
cgrp->id = 0;
}
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
if (err < 0) {
pr_err("Failed to update cgroup index map\n");
goto out;
}
i++;
}
/*
* bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
* whether the kernel support it
*/
prog_fd = bpf_program__fd(skel->progs.trigger_read);
err = bperf_trigger_reading(prog_fd, 0);
if (err) {
pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
"Therefore, --for-each-cgroup might show inaccurate readings\n");
err = 0;
}
out:
return err;
}
static int bperf_cgrp__load(struct evsel *evsel,
struct target *target __maybe_unused)
{
static bool bperf_loaded = false;
evsel->bperf_leader_prog_fd = -1;
evsel->bperf_leader_link_fd = -1;
if (!bperf_loaded && bperf_load_program(evsel->evlist))
return -1;
bperf_loaded = true;
/* just to bypass bpf_counter_skip() */
evsel->follower_skel = (struct bperf_follower_bpf *)skel;
return 0;
}
static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
int cpu __maybe_unused, int fd __maybe_unused)
{
/* nothing to do */
return 0;
}
/*
* trigger the leader prog on each cpu, so the cgrp_reading map could get
* the latest results.
*/
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
struct perf_cpu cpu;
int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
bperf_trigger_reading(prog_fd, cpu.cpu);
return 0;
}
static int bperf_cgrp__enable(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
skel->bss->enabled = 1;
return 0;
}
static int bperf_cgrp__disable(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
skel->bss->enabled = 0;
return 0;
}
static int bperf_cgrp__read(struct evsel *evsel)
{
struct evlist *evlist = evsel->evlist;
int total_cpus = cpu__max_cpu().cpu;
struct perf_counts_values *counts;
struct bpf_perf_event_value *values;
int reading_map_fd, err = 0;
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
values = calloc(total_cpus, sizeof(*values));
if (values == NULL)
return -ENOMEM;
reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
evlist__for_each_entry(evlist, evsel) {
__u32 idx = evsel->core.idx;
int i;
struct perf_cpu cpu;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
idx, evsel__name(evsel), evsel->cgrp->name);
goto out;
}
perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
counts = perf_counts(evsel->counts, i, 0);
counts->val = values[cpu.cpu].counter;
counts->ena = values[cpu.cpu].enabled;
counts->run = values[cpu.cpu].running;
}
}
out:
free(values);
return err;
}
static int bperf_cgrp__destroy(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgroup_bpf__destroy(skel);
evsel__delete(cgrp_switch); // it'll destroy on_switch progs too
return 0;
}
struct bpf_counter_ops bperf_cgrp_ops = {
.load = bperf_cgrp__load,
.enable = bperf_cgrp__enable,
.disable = bperf_cgrp__disable,
.read = bperf_cgrp__read,
.install_pe = bperf_cgrp__install_pe,
.destroy = bperf_cgrp__destroy,
};
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2023 Red Hat
*/
#ifndef DATA_VIO_H
#define DATA_VIO_H
#include <linux/atomic.h>
#include <linux/bio.h>
#include <linux/list.h>
#include "permassert.h"
#include "indexer.h"
#include "block-map.h"
#include "completion.h"
#include "constants.h"
#include "dedupe.h"
#include "encodings.h"
#include "logical-zone.h"
#include "physical-zone.h"
#include "types.h"
#include "vdo.h"
#include "vio.h"
#include "wait-queue.h"
/* Codes for describing the last asynchronous operation performed on a vio. */
enum async_operation_number {
MIN_VIO_ASYNC_OPERATION_NUMBER,
VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
VIO_ASYNC_OP_CLEANUP,
VIO_ASYNC_OP_COMPRESS_DATA_VIO,
VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
VIO_ASYNC_OP_HASH_DATA_VIO,
VIO_ASYNC_OP_JOURNAL_REMAPPING,
VIO_ASYNC_OP_ATTEMPT_PACKING,
VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
VIO_ASYNC_OP_READ_DATA_VIO,
VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
VIO_ASYNC_OP_VERIFY_DUPLICATION,
VIO_ASYNC_OP_WRITE_DATA_VIO,
MAX_VIO_ASYNC_OPERATION_NUMBER,
} __packed;
struct lbn_lock {
logical_block_number_t lbn;
bool locked;
struct vdo_wait_queue waiters;
struct logical_zone *zone;
};
/* A position in the arboreal block map at a specific level. */
struct block_map_tree_slot {
page_number_t page_index;
struct block_map_slot block_map_slot;
};
/* Fields for using the arboreal block map. */
struct tree_lock {
/* The current height at which this data_vio is operating */
height_t height;
/* The block map tree for this LBN */
root_count_t root_index;
/* Whether we hold a page lock */
bool locked;
/* The key for the lock map */
u64 key;
/* The queue of waiters for the page this vio is allocating or loading */
struct vdo_wait_queue waiters;
/* The block map tree slots for this LBN */
struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
};
struct zoned_pbn {
physical_block_number_t pbn;
enum block_mapping_state state;
struct physical_zone *zone;
};
/*
* Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
* this enum.
*/
enum data_vio_compression_stage {
/* A data_vio which has not yet entered the compression path */
DATA_VIO_PRE_COMPRESSOR,
/* A data_vio which is in the compressor */
DATA_VIO_COMPRESSING,
/* A data_vio which is blocked in the packer */
DATA_VIO_PACKING,
/* A data_vio which is no longer on the compression path (and never will be) */
DATA_VIO_POST_PACKER,
};
struct data_vio_compression_status {
enum data_vio_compression_stage stage;
bool may_not_compress;
};
struct compression_state {
/*
* The current compression status of this data_vio. This field contains a value which
* consists of a data_vio_compression_stage and a flag indicating whether a request has
* been made to cancel (or prevent) compression for this data_vio.
*
* This field should be accessed through the get_data_vio_compression_status() and
* set_data_vio_compression_status() methods. It should not be accessed directly.
*/
atomic_t status;
/* The compressed size of this block */
u16 size;
/* The packer input or output bin slot which holds the enclosing data_vio */
slot_number_t slot;
/* The packer bin to which the enclosing data_vio has been assigned */
struct packer_bin *bin;
/* A link in the chain of data_vios which have been packed together */
struct data_vio *next_in_batch;
/* A vio which is blocked in the packer while holding a lock this vio needs. */
struct data_vio *lock_holder;
/*
* The compressed block used to hold the compressed form of this block and that of any
* other blocks for which this data_vio is the compressed write agent.
*/
struct compressed_block *block;
};
/* Fields supporting allocation of data blocks. */
struct allocation {
/* The physical zone in which to allocate a physical block */
struct physical_zone *zone;
/* The block allocated to this vio */
physical_block_number_t pbn;
/*
* If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until
* the block has been written, after which it will become a read lock.
*/
struct pbn_lock *lock;
/* The type of write lock to obtain on the allocated block */
enum pbn_lock_type write_lock_type;
/* The zone which was the start of the current allocation cycle */
zone_count_t first_allocation_zone;
/* Whether this vio should wait for a clean slab */
bool wait_for_clean_slab;
};
struct reference_updater {
enum journal_operation operation;
bool increment;
struct zoned_pbn zpbn;
struct pbn_lock *lock;
struct vdo_waiter waiter;
};
/* A vio for processing user data requests. */
struct data_vio {
/* The vdo_wait_queue entry structure */
struct vdo_waiter waiter;
/* The logical block of this request */
struct lbn_lock logical;
/* The state for traversing the block map tree */
struct tree_lock tree_lock;
/* The current partition address of this block */
struct zoned_pbn mapped;
/* The hash of this vio (if not zero) */
struct uds_record_name record_name;
/* Used for logging and debugging */
enum async_operation_number last_async_operation;
/* The operations to record in the recovery and slab journals */
struct reference_updater increment_updater;
struct reference_updater decrement_updater;
u16 read : 1;
u16 write : 1;
u16 fua : 1;
u16 is_zero : 1;
u16 is_discard : 1;
u16 is_partial : 1;
u16 is_duplicate : 1;
u16 first_reference_operation_complete : 1;
u16 downgrade_allocation_lock : 1;
struct allocation allocation;
/*
* Whether this vio has received an allocation. This field is examined from threads not in
* the allocation zone.
*/
bool allocation_succeeded;
/* The new partition address of this block after the vio write completes */
struct zoned_pbn new_mapped;
/* The hash zone responsible for the name (NULL if is_zero_block) */
struct hash_zone *hash_zone;
/* The lock this vio holds or shares with other vios with the same data */
struct hash_lock *hash_lock;
/* All data_vios sharing a hash lock are kept in a list linking these list entries */
struct list_head hash_lock_entry;
/* The block number in the partition of the UDS deduplication advice */
struct zoned_pbn duplicate;
/*
* The sequence number of the recovery journal block containing the increment entry for
* this vio.
*/
sequence_number_t recovery_sequence_number;
/* The point in the recovery journal where this write last made an entry */
struct journal_point recovery_journal_point;
/* The list of vios in user initiated write requests */
struct list_head write_entry;
/* The generation number of the VDO that this vio belongs to */
sequence_number_t flush_generation;
/* The completion to use for fetching block map pages for this vio */
struct vdo_page_completion page_completion;
/* The user bio that initiated this VIO */
struct bio *user_bio;
/* partial block support */
block_size_t offset;
/*
* The number of bytes to be discarded. For discards, this field will always be positive,
* whereas for non-discards it will always be 0. Hence it can be used to determine whether
* a data_vio is processing a discard, even after the user_bio has been acknowledged.
*/
u32 remaining_discard;
struct dedupe_context *dedupe_context;
/* Fields beyond this point will not be reset when a pooled data_vio is reused. */
struct vio vio;
/* The completion for making reference count decrements */
struct vdo_completion decrement_completion;
/* All of the fields necessary for the compression path */
struct compression_state compression;
/* A block used as output during compression or uncompression */
char *scratch_block;
struct list_head pool_entry;
};
static inline struct data_vio *vio_as_data_vio(struct vio *vio)
{
VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
return container_of(vio, struct data_vio, vio);
}
static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
{
return vio_as_data_vio(as_vio(completion));
}
static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
{
if (waiter == NULL)
return NULL;
return container_of(waiter, struct data_vio, waiter);
}
static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
{
if (updater->increment)
return container_of(updater, struct data_vio, increment_updater);
return container_of(updater, struct data_vio, decrement_updater);
}
static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
{
return !list_empty(&data_vio->write_entry);
}
static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
{
return data_vio->vio.completion.vdo;
}
static inline bool data_vio_has_allocation(struct data_vio *data_vio)
{
return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
}
struct data_vio_compression_status __must_check
advance_data_vio_compression_stage(struct data_vio *data_vio);
struct data_vio_compression_status __must_check
get_data_vio_compression_status(struct data_vio *data_vio);
bool cancel_data_vio_compression(struct data_vio *data_vio);
struct data_vio_pool;
int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
void free_data_vio_pool(struct data_vio_pool *pool);
void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
void complete_data_vio(struct vdo_completion *completion);
void handle_data_vio_error(struct vdo_completion *completion);
static inline void continue_data_vio(struct data_vio *data_vio)
{
vdo_launch_completion(&data_vio->vio.completion);
}
/**
* continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
*
* This will not mask older errors. This function can be called with a success code, but it is more
* efficient to call continue_data_vio() if the caller knows the result was a success.
*/
static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
{
vdo_continue_completion(&data_vio->vio.completion, result);
}
const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->hash_zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
/*
* It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
* inline, and the LBN better than nothing as an identifier.
*/
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->hash_zone->thread_id);
}
/**
* launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it
* immediately.
*/
static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_hash_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->logical.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->logical.zone->thread_id);
}
/**
* launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it
* immediately.
*/
static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_logical_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->allocation.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->allocation.pbn, thread_id,
expected);
}
static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->allocation.zone->thread_id);
}
/**
* launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a
* data_vio's allocated zone and queue the data_vio and
* invoke it immediately.
*/
static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_allocated_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->duplicate.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->duplicate.pbn, thread_id,
expected);
}
static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->duplicate.zone->thread_id);
}
/**
* launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a
* data_vio's duplicate zone and queue the data_vio and
* invoke it immediately.
*/
static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_duplicate_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->mapped.pbn, thread_id, expected);
}
static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->mapped.zone->thread_id);
}
static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->new_mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->new_mapped.pbn, thread_id,
expected);
}
static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->new_mapped.zone->thread_id);
}
static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
{
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on journal thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
journal_thread);
}
static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
}
/**
* launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it
* immediately.
*/
static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_journal_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
{
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on packer thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
packer_thread);
}
static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
}
/**
* launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it
* immediately.
*/
static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_packer_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
{
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on cpu thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
cpu_thread);
}
static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
}
/**
* launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it
* immediately.
*/
static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
vdo_action_fn callback,
enum vdo_completion_priority priority)
{
set_data_vio_cpu_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
}
static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
get_vio_bio_zone_thread_id(&data_vio->vio));
}
/**
* launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it
* immediately.
*/
static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_bio_zone_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion,
BIO_Q_DATA_PRIORITY);
}
/**
* launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on
* it and invoke it immediately, otherwise, just run the
* callback on the current thread.
*/
static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
vdo_action_fn callback)
{
struct vdo_completion *completion = &data_vio->vio.completion;
struct vdo *vdo = completion->vdo;
if (!vdo_uses_bio_ack_queue(vdo)) {
callback(completion);
return;
}
vdo_set_completion_callback(completion, callback,
vdo->thread_config.bio_ack_thread);
vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
}
void data_vio_allocate_data_block(struct data_vio *data_vio,
enum pbn_lock_type write_lock_type,
vdo_action_fn callback, vdo_action_fn error_handler);
void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
int __must_check uncompress_data_vio(struct data_vio *data_vio,
enum block_mapping_state mapping_state,
char *buffer);
void update_metadata_for_data_vio_write(struct data_vio *data_vio,
struct pbn_lock *lock);
void write_data_vio(struct data_vio *data_vio);
void launch_compress_data_vio(struct data_vio *data_vio);
void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
#endif /* DATA_VIO_H */
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2016-2017, National Instruments Corp.
*
* Author: Moritz Fischer <[email protected]>
*/
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/nvmem-consumer.h>
#include <linux/ethtool.h>
#include <linux/iopoll.h>
#define TX_BD_NUM 64
#define RX_BD_NUM 128
/* Axi DMA Register definitions */
#define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
#define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
#define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
#define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
#define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
#define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
#define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
#define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
#define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
#define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
#define XAXIDMA_DELAY_SHIFT 24
#define XAXIDMA_COALESCE_SHIFT 16
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
/* Default TX/RX Threshold and waitbound values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_WAITBOUND 254
#define XAXIDMA_DFT_RX_THRESHOLD 24
#define XAXIDMA_DFT_RX_WAITBOUND 254
#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
#define NIXGE_REG_CTRL_OFFSET 0x4000
#define NIXGE_REG_INFO 0x00
#define NIXGE_REG_MAC_CTL 0x04
#define NIXGE_REG_PHY_CTL 0x08
#define NIXGE_REG_LED_CTL 0x0c
#define NIXGE_REG_MDIO_DATA 0x10
#define NIXGE_REG_MDIO_ADDR 0x14
#define NIXGE_REG_MDIO_OP 0x18
#define NIXGE_REG_MDIO_CTRL 0x1c
#define NIXGE_ID_LED_CTL_EN BIT(0)
#define NIXGE_ID_LED_CTL_VAL BIT(1)
#define NIXGE_MDIO_CLAUSE45 BIT(12)
#define NIXGE_MDIO_CLAUSE22 0
#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
#define NIXGE_MDIO_OP_ADDRESS 0
#define NIXGE_MDIO_C45_WRITE BIT(0)
#define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
#define NIXGE_MDIO_C22_WRITE BIT(0)
#define NIXGE_MDIO_C22_READ BIT(1)
#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
#define NIXGE_REG_MAC_LSB 0x1000
#define NIXGE_REG_MAC_MSB 0x1004
/* Packet size info */
#define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
#define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
#define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
#define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
#define NIXGE_MAX_JUMBO_FRAME_SIZE \
(NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
enum nixge_version {
NIXGE_V2,
NIXGE_V3,
NIXGE_VERSION_COUNT
};
struct nixge_hw_dma_bd {
u32 next_lo;
u32 next_hi;
u32 phys_lo;
u32 phys_hi;
u32 reserved3;
u32 reserved4;
u32 cntrl;
u32 status;
u32 app0;
u32 app1;
u32 app2;
u32 app3;
u32 app4;
u32 sw_id_offset_lo;
u32 sw_id_offset_hi;
u32 reserved6;
};
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
do { \
(bd)->field##_lo = lower_32_bits((addr)); \
(bd)->field##_hi = upper_32_bits((addr)); \
} while (0)
#else
#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
((bd)->field##_lo = lower_32_bits((addr)))
#endif
#define nixge_hw_dma_bd_set_phys(bd, addr) \
nixge_hw_dma_bd_set_addr((bd), phys, (addr))
#define nixge_hw_dma_bd_set_next(bd, addr) \
nixge_hw_dma_bd_set_addr((bd), next, (addr))
#define nixge_hw_dma_bd_set_offset(bd, addr) \
nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define nixge_hw_dma_bd_get_addr(bd, field) \
(dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
#else
#define nixge_hw_dma_bd_get_addr(bd, field) \
(dma_addr_t)((bd)->field##_lo)
#endif
struct nixge_tx_skb {
struct sk_buff *skb;
dma_addr_t mapping;
size_t size;
bool mapped_as_page;
};
struct nixge_priv {
struct net_device *ndev;
struct napi_struct napi;
struct device *dev;
/* Connection to PHY device */
struct device_node *phy_node;
phy_interface_t phy_mode;
int link;
unsigned int speed;
unsigned int duplex;
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
/* IO registers, dma functions and IRQs */
void __iomem *ctrl_regs;
void __iomem *dma_regs;
struct tasklet_struct dma_err_tasklet;
int tx_irq;
int rx_irq;
/* Buffer descriptors */
struct nixge_hw_dma_bd *tx_bd_v;
struct nixge_tx_skb *tx_skb;
dma_addr_t tx_bd_p;
struct nixge_hw_dma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 tx_bd_ci;
u32 tx_bd_tail;
u32 rx_bd_ci;
u32 coalesce_count_rx;
u32 coalesce_count_tx;
};
static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
{
writel(val, priv->dma_regs + offset);
}
static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
dma_addr_t addr)
{
writel(lower_32_bits(addr), priv->dma_regs + offset);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
#endif
}
static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
{
return readl(priv->dma_regs + offset);
}
static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
{
writel(val, priv->ctrl_regs + offset);
}
static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
{
return readl(priv->ctrl_regs + offset);
}
#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
(sleep_us), (timeout_us))
#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
(sleep_us), (timeout_us))
static void nixge_hw_dma_bd_release(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
dma_addr_t phys_addr;
struct sk_buff *skb;
int i;
if (priv->rx_bd_v) {
for (i = 0; i < RX_BD_NUM; i++) {
phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
phys);
dma_unmap_single(ndev->dev.parent, phys_addr,
NIXGE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
skb = (struct sk_buff *)(uintptr_t)
nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
sw_id_offset);
dev_kfree_skb(skb);
}
dma_free_coherent(ndev->dev.parent,
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
priv->rx_bd_v,
priv->rx_bd_p);
}
if (priv->tx_skb)
devm_kfree(ndev->dev.parent, priv->tx_skb);
if (priv->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
sizeof(*priv->tx_bd_v) * TX_BD_NUM,
priv->tx_bd_v,
priv->tx_bd_p);
}
static int nixge_hw_dma_bd_init(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
dma_addr_t phys;
u32 cr;
int i;
/* Reset the indexes which are used for accessing the BDs */
priv->tx_bd_ci = 0;
priv->tx_bd_tail = 0;
priv->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*priv->tx_bd_v) * TX_BD_NUM,
&priv->tx_bd_p, GFP_KERNEL);
if (!priv->tx_bd_v)
goto out;
priv->tx_skb = devm_kcalloc(ndev->dev.parent,
TX_BD_NUM, sizeof(*priv->tx_skb),
GFP_KERNEL);
if (!priv->tx_skb)
goto out;
priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
&priv->rx_bd_p, GFP_KERNEL);
if (!priv->rx_bd_v)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
priv->tx_bd_p +
sizeof(*priv->tx_bd_v) *
((i + 1) % TX_BD_NUM));
}
for (i = 0; i < RX_BD_NUM; i++) {
nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
priv->rx_bd_p
+ sizeof(*priv->rx_bd_v) *
((i + 1) % RX_BD_NUM));
skb = __netdev_alloc_skb_ip_align(ndev,
NIXGE_MAX_JUMBO_FRAME_SIZE,
GFP_KERNEL);
if (!skb)
goto out;
nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
phys = dma_map_single(ndev->dev.parent, skb->data,
NIXGE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
}
/* Start updating the Rx channel control register */
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
/* Update the delay timer count */
cr = ((cr & ~XAXIDMA_DELAY_MASK) |
(XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Write to the Rx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
/* Start updating the Tx channel control register */
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
/* Update the delay timer count */
cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
(XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Write to the Tx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
(sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
return 0;
out:
nixge_hw_dma_bd_release(ndev);
return -ENOMEM;
}
static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
{
u32 status;
int err;
/* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
* The reset process of Axi DMA takes a while to complete as all
* pending commands/transfers will be flushed or completed during
* this reset process.
*/
nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
err = nixge_dma_poll_timeout(priv, offset, status,
!(status & XAXIDMA_CR_RESET_MASK), 10,
1000);
if (err)
netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
}
static void nixge_device_reset(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
__nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
__nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
if (nixge_hw_dma_bd_init(ndev))
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
netif_trans_update(ndev);
}
static void nixge_handle_link_change(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
if (phydev->link != priv->link || phydev->speed != priv->speed ||
phydev->duplex != priv->duplex) {
priv->link = phydev->link;
priv->speed = phydev->speed;
priv->duplex = phydev->duplex;
phy_print_status(phydev);
}
}
static void nixge_tx_skb_unmap(struct nixge_priv *priv,
struct nixge_tx_skb *tx_skb)
{
if (tx_skb->mapping) {
if (tx_skb->mapped_as_page)
dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
tx_skb->size, DMA_TO_DEVICE);
else
dma_unmap_single(priv->ndev->dev.parent,
tx_skb->mapping,
tx_skb->size, DMA_TO_DEVICE);
tx_skb->mapping = 0;
}
if (tx_skb->skb) {
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
}
static void nixge_start_xmit_done(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
unsigned int status = 0;
u32 packets = 0;
u32 size = 0;
cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
tx_skb = &priv->tx_skb[priv->tx_bd_ci];
status = cur_p->status;
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
nixge_tx_skb_unmap(priv, tx_skb);
cur_p->status = 0;
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
++priv->tx_bd_ci;
priv->tx_bd_ci %= TX_BD_NUM;
cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
tx_skb = &priv->tx_skb[priv->tx_bd_ci];
status = cur_p->status;
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
if (packets)
netif_wake_queue(ndev);
}
static int nixge_check_tx_bd_space(struct nixge_priv *priv,
int num_frag)
{
struct nixge_hw_dma_bd *cur_p;
cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
}
static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
dma_addr_t tail_p, cur_phys;
skb_frag_t *frag;
u32 num_frag;
u32 ii;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
tx_skb = &priv->tx_skb[priv->tx_bd_tail];
if (nixge_check_tx_bd_space(priv, num_frag)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
}
cur_phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, cur_phys))
goto drop;
nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
tx_skb->skb = NULL;
tx_skb->mapping = cur_phys;
tx_skb->size = skb_headlen(skb);
tx_skb->mapped_as_page = false;
for (ii = 0; ii < num_frag; ii++) {
++priv->tx_bd_tail;
priv->tx_bd_tail %= TX_BD_NUM;
cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
tx_skb = &priv->tx_skb[priv->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, cur_phys))
goto frag_err;
nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = skb_frag_size(frag);
tx_skb->skb = NULL;
tx_skb->mapping = cur_phys;
tx_skb->size = skb_frag_size(frag);
tx_skb->mapped_as_page = true;
}
/* last buffer of the frame */
tx_skb->skb = skb;
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
/* Start the transfer */
nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
++priv->tx_bd_tail;
priv->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
frag_err:
for (; ii > 0; ii--) {
if (priv->tx_bd_tail)
priv->tx_bd_tail--;
else
priv->tx_bd_tail = TX_BD_NUM - 1;
tx_skb = &priv->tx_skb[priv->tx_bd_tail];
nixge_tx_skb_unmap(priv, tx_skb);
cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
cur_p->status = 0;
}
dma_unmap_single(priv->ndev->dev.parent,
tx_skb->mapping,
tx_skb->size, DMA_TO_DEVICE);
drop:
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
static int nixge_recv(struct net_device *ndev, int budget)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct nixge_hw_dma_bd *cur_p;
dma_addr_t tail_p = 0, cur_phys = 0;
u32 packets = 0;
u32 length = 0;
u32 size = 0;
cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
budget > packets)) {
tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
priv->rx_bd_ci;
skb = (struct sk_buff *)(uintptr_t)
nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
length = NIXGE_MAX_JUMBO_FRAME_SIZE;
dma_unmap_single(ndev->dev.parent,
nixge_hw_dma_bd_get_addr(cur_p, phys),
NIXGE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* For now mark them as CHECKSUM_NONE since
* we don't have offload capabilities
*/
skb->ip_summed = CHECKSUM_NONE;
napi_gro_receive(&priv->napi, skb);
size += length;
packets++;
new_skb = netdev_alloc_skb_ip_align(ndev,
NIXGE_MAX_JUMBO_FRAME_SIZE);
if (!new_skb)
return packets;
cur_phys = dma_map_single(ndev->dev.parent, new_skb->data,
NIXGE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, cur_phys)) {
/* FIXME: bail out and clean up */
netdev_err(ndev, "Failed to map ...\n");
}
nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
cur_p->status = 0;
nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
++priv->rx_bd_ci;
priv->rx_bd_ci %= RX_BD_NUM;
cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
}
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
if (tail_p)
nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
return packets;
}
static int nixge_poll(struct napi_struct *napi, int budget)
{
struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
int work_done;
u32 status, cr;
work_done = 0;
work_done = nixge_recv(priv->ndev, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
/* If there's more, reschedule, but clear */
nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
napi_schedule(napi);
} else {
/* if not, turn on RX IRQs again ... */
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
}
}
return work_done;
}
static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
{
struct nixge_priv *priv = netdev_priv(_ndev);
struct net_device *ndev = _ndev;
unsigned int status;
dma_addr_t phys;
u32 cr;
status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
nixge_start_xmit_done(priv->ndev);
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
netdev_err(ndev, "No interrupts asserted in Tx path\n");
return IRQ_NONE;
}
if (status & XAXIDMA_IRQ_ERROR_MASK) {
phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci],
phys);
netdev_err(ndev, "DMA Tx error 0x%x\n", status);
netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Tx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Rx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&priv->dma_err_tasklet);
nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
}
out:
return IRQ_HANDLED;
}
static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
{
struct nixge_priv *priv = netdev_priv(_ndev);
struct net_device *ndev = _ndev;
unsigned int status;
dma_addr_t phys;
u32 cr;
status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
/* Turn of IRQs because NAPI */
nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
napi_schedule(&priv->napi);
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
netdev_err(ndev, "No interrupts asserted in Rx path\n");
return IRQ_NONE;
}
if (status & XAXIDMA_IRQ_ERROR_MASK) {
phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
phys);
netdev_err(ndev, "DMA Rx error 0x%x\n", status);
netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Finally write to the Tx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* write to the Rx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&priv->dma_err_tasklet);
nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
}
out:
return IRQ_HANDLED;
}
static void nixge_dma_err_handler(struct tasklet_struct *t)
{
struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
u32 cr, i;
__nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
__nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
for (i = 0; i < TX_BD_NUM; i++) {
cur_p = &lp->tx_bd_v[i];
tx_skb = &lp->tx_skb[i];
nixge_tx_skb_unmap(lp, tx_skb);
nixge_hw_dma_bd_set_phys(cur_p, 0);
cur_p->cntrl = 0;
cur_p->status = 0;
nixge_hw_dma_bd_set_offset(cur_p, 0);
}
for (i = 0; i < RX_BD_NUM; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
}
lp->tx_bd_ci = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
/* Start updating the Rx channel control register */
cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
(XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
/* Update the delay timer count */
cr = ((cr & ~XAXIDMA_DELAY_MASK) |
(XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Finally write to the Rx channel control register */
nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
/* Start updating the Tx channel control register */
cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
(XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
/* Update the delay timer count */
cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
(XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Finally write to the Tx channel control register */
nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
}
static int nixge_open(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct phy_device *phy;
int ret;
nixge_device_reset(ndev);
phy = of_phy_connect(ndev, priv->phy_node,
&nixge_handle_link_change, 0, priv->phy_mode);
if (!phy)
return -ENODEV;
phy_start(phy);
/* Enable tasklets for Axi DMA error handling */
tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
napi_enable(&priv->napi);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
if (ret)
goto err_tx_irq;
/* Enable interrupts for Axi DMA Rx */
ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
netif_start_queue(ndev);
return 0;
err_rx_irq:
free_irq(priv->tx_irq, ndev);
err_tx_irq:
napi_disable(&priv->napi);
phy_stop(phy);
phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet);
netdev_err(ndev, "request_irq() failed\n");
return ret;
}
static int nixge_stop(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
u32 cr;
netif_stop_queue(ndev);
napi_disable(&priv->napi);
if (ndev->phydev) {
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
}
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
cr & (~XAXIDMA_CR_RUNSTOP_MASK));
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
cr & (~XAXIDMA_CR_RUNSTOP_MASK));
tasklet_kill(&priv->dma_err_tasklet);
free_irq(priv->tx_irq, ndev);
free_irq(priv->rx_irq, ndev);
nixge_hw_dma_bd_release(ndev);
return 0;
}
static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
{
if (netif_running(ndev))
return -EBUSY;
if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
NIXGE_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
(ndev->dev_addr[2]) << 24 |
(ndev->dev_addr[3] << 16) |
(ndev->dev_addr[4] << 8) |
(ndev->dev_addr[5] << 0));
nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
(ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
return 0;
}
static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
{
int err;
err = eth_mac_addr(ndev, p);
if (!err)
__nixge_hw_set_mac_address(ndev);
return err;
}
static const struct net_device_ops nixge_netdev_ops = {
.ndo_open = nixge_open,
.ndo_stop = nixge_stop,
.ndo_start_xmit = nixge_start_xmit,
.ndo_change_mtu = nixge_change_mtu,
.ndo_set_mac_address = nixge_net_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
strscpy(ed->driver, "nixge", sizeof(ed->driver));
strscpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int
nixge_ethtools_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct nixge_priv *priv = netdev_priv(ndev);
u32 regval = 0;
regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
>> XAXIDMA_COALESCE_SHIFT;
regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
>> XAXIDMA_COALESCE_SHIFT;
return 0;
}
static int
nixge_ethtools_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct nixge_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
netdev_err(ndev,
"Please stop netif before applying configuration\n");
return -EBUSY;
}
if (ecoalesce->rx_max_coalesced_frames)
priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
return 0;
}
static int nixge_ethtools_set_phys_id(struct net_device *ndev,
enum ethtool_phys_id_state state)
{
struct nixge_priv *priv = netdev_priv(ndev);
u32 ctrl;
ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
switch (state) {
case ETHTOOL_ID_ACTIVE:
ctrl |= NIXGE_ID_LED_CTL_EN;
/* Enable identification LED override*/
nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
return 2;
case ETHTOOL_ID_ON:
ctrl |= NIXGE_ID_LED_CTL_VAL;
nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
break;
case ETHTOOL_ID_OFF:
ctrl &= ~NIXGE_ID_LED_CTL_VAL;
nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
break;
case ETHTOOL_ID_INACTIVE:
/* Restore LED settings */
ctrl &= ~NIXGE_ID_LED_CTL_EN;
nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
break;
}
return 0;
}
static const struct ethtool_ops nixge_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nixge_ethtools_get_drvinfo,
.get_coalesce = nixge_ethtools_get_coalesce,
.set_coalesce = nixge_ethtools_set_coalesce,
.set_phys_id = nixge_ethtools_set_phys_id,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
};
static int nixge_mdio_read_c22(struct mii_bus *bus, int phy_id, int reg)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
int err;
u16 device;
device = reg & 0x1f;
tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err) {
dev_err(priv->dev, "timeout setting read command");
return err;
}
status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
return status;
}
static int nixge_mdio_read_c45(struct mii_bus *bus, int phy_id, int device,
int reg)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
int err;
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
tmp = NIXGE_MDIO_CLAUSE45 |
NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err) {
dev_err(priv->dev, "timeout setting address");
return err;
}
tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err) {
dev_err(priv->dev, "timeout setting read command");
return err;
}
status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
return status;
}
static int nixge_mdio_write_c22(struct mii_bus *bus, int phy_id, int reg,
u16 val)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
u16 device;
int err;
device = reg & 0x1f;
tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err)
dev_err(priv->dev, "timeout setting write command");
return err;
}
static int nixge_mdio_write_c45(struct mii_bus *bus, int phy_id,
int device, int reg, u16 val)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
int err;
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
tmp = NIXGE_MDIO_CLAUSE45 |
NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err) {
dev_err(priv->dev, "timeout setting address");
return err;
}
tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000);
if (err)
dev_err(priv->dev, "timeout setting write command");
return err;
}
static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
{
struct mii_bus *bus;
bus = devm_mdiobus_alloc(priv->dev);
if (!bus)
return -ENOMEM;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
bus->priv = priv;
bus->name = "nixge_mii_bus";
bus->read = nixge_mdio_read_c22;
bus->write = nixge_mdio_write_c22;
bus->read_c45 = nixge_mdio_read_c45;
bus->write_c45 = nixge_mdio_write_c45;
bus->parent = priv->dev;
priv->mii_bus = bus;
return of_mdiobus_register(bus, np);
}
static void *nixge_get_nvmem_address(struct device *dev)
{
struct nvmem_cell *cell;
size_t cell_size;
char *mac;
cell = nvmem_cell_get(dev, "address");
if (IS_ERR(cell))
return cell;
mac = nvmem_cell_read(cell, &cell_size);
nvmem_cell_put(cell);
return mac;
}
/* Match table for of_platform binding */
static const struct of_device_id nixge_dt_ids[] = {
{ .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 },
{ .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 },
{},
};
MODULE_DEVICE_TABLE(of, nixge_dt_ids);
static int nixge_of_get_resources(struct platform_device *pdev)
{
const struct of_device_id *of_id;
enum nixge_version version;
struct net_device *ndev;
struct nixge_priv *priv;
ndev = platform_get_drvdata(pdev);
priv = netdev_priv(ndev);
of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node);
if (!of_id)
return -ENODEV;
version = (enum nixge_version)of_id->data;
if (version <= NIXGE_V2)
priv->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
else
priv->dma_regs = devm_platform_ioremap_resource_byname(pdev, "dma");
if (IS_ERR(priv->dma_regs)) {
netdev_err(ndev, "failed to map dma regs\n");
return PTR_ERR(priv->dma_regs);
}
if (version <= NIXGE_V2)
priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
else
priv->ctrl_regs = devm_platform_ioremap_resource_byname(pdev, "ctrl");
if (IS_ERR(priv->ctrl_regs)) {
netdev_err(ndev, "failed to map ctrl regs\n");
return PTR_ERR(priv->ctrl_regs);
}
return 0;
}
static int nixge_probe(struct platform_device *pdev)
{
struct device_node *mn, *phy_node;
struct nixge_priv *priv;
struct net_device *ndev;
const u8 *mac_addr;
int err;
ndev = alloc_etherdev(sizeof(*priv));
if (!ndev)
return -ENOMEM;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &nixge_netdev_ops;
ndev->ethtool_ops = &nixge_ethtool_ops;
/* MTU range: 64 - 9000 */
ndev->min_mtu = 64;
ndev->max_mtu = NIXGE_JUMBO_MTU;
mac_addr = nixge_get_nvmem_address(&pdev->dev);
if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) {
eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
eth_hw_addr_random(ndev);
}
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->dev = &pdev->dev;
netif_napi_add(ndev, &priv->napi, nixge_poll);
err = nixge_of_get_resources(pdev);
if (err)
goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
err = priv->tx_irq;
goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
err = priv->rx_irq;
goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
mn = of_get_child_by_name(pdev->dev.of_node, "mdio");
if (mn) {
err = nixge_mdio_setup(priv, mn);
of_node_put(mn);
if (err) {
netdev_err(ndev, "error registering mdio bus");
goto free_netdev;
}
}
err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
if (err) {
netdev_err(ndev, "not find \"phy-mode\" property\n");
goto unregister_mdio;
}
phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) {
err = of_phy_register_fixed_link(pdev->dev.of_node);
if (err < 0) {
netdev_err(ndev, "broken fixed-link specification\n");
goto unregister_mdio;
}
phy_node = of_node_get(pdev->dev.of_node);
}
priv->phy_node = phy_node;
err = register_netdev(priv->ndev);
if (err) {
netdev_err(ndev, "register_netdev() error (%i)\n", err);
goto free_phy;
}
return 0;
free_phy:
if (of_phy_is_fixed_link(pdev->dev.of_node))
of_phy_deregister_fixed_link(pdev->dev.of_node);
of_node_put(phy_node);
unregister_mdio:
if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
free_netdev:
free_netdev(ndev);
return err;
}
static void nixge_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nixge_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
if (of_phy_is_fixed_link(pdev->dev.of_node))
of_phy_deregister_fixed_link(pdev->dev.of_node);
of_node_put(priv->phy_node);
if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
free_netdev(ndev);
}
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
.remove = nixge_remove,
.driver = {
.name = "nixge",
.of_match_table = nixge_dt_ids,
},
};
module_platform_driver(nixge_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("National Instruments XGE Management MAC");
MODULE_AUTHOR("Moritz Fischer <[email protected]>");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
*
* Header file for Samsung MFC (Multi Function Codec - FIMV) driver
* Contains declarations of hw related functions.
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
#ifndef S5P_MFC_OPR_V6_H_
#define S5P_MFC_OPR_V6_H_
#include "s5p_mfc_common.h"
#include "s5p_mfc_opr.h"
#define MFC_CTRL_MODE_CUSTOM MFC_CTRL_MODE_SFR
#define MB_WIDTH(x_size) DIV_ROUND_UP(x_size, 16)
#define MB_HEIGHT(y_size) DIV_ROUND_UP(y_size, 16)
#define S5P_MFC_DEC_MV_SIZE(x, y, offset) (MB_WIDTH(x) * \
(((MB_HEIGHT(y) + 1) / 2) * 2) * 64 + (offset))
#define S5P_MFC_LCU_WIDTH(x_size) DIV_ROUND_UP(x_size, 32)
#define S5P_MFC_LCU_HEIGHT(y_size) DIV_ROUND_UP(y_size, 32)
#define s5p_mfc_dec_hevc_mv_size(x, y) \
(DIV_ROUND_UP(x, 64) * DIV_ROUND_UP(y, 64) * 256 + 512)
/* Definition */
#define ENC_MULTI_SLICE_MB_MAX ((1 << 30) - 1)
#define ENC_MULTI_SLICE_BIT_MIN 2800
#define ENC_INTRA_REFRESH_MB_MAX ((1 << 18) - 1)
#define ENC_VBV_BUF_SIZE_MAX ((1 << 30) - 1)
#define ENC_H264_LOOP_FILTER_AB_MIN -12
#define ENC_H264_LOOP_FILTER_AB_MAX 12
#define ENC_H264_RC_FRAME_RATE_MAX ((1 << 16) - 1)
#define ENC_H263_RC_FRAME_RATE_MAX ((1 << 16) - 1)
#define ENC_H264_PROFILE_MAX 3
#define ENC_H264_LEVEL_MAX 42
#define ENC_MPEG4_VOP_TIME_RES_MAX ((1 << 16) - 1)
#define FRAME_DELTA_H264_H263 1
#define LOOSE_CBR_MAX 5
#define TIGHT_CBR_MAX 10
#define ENC_HEVC_RC_FRAME_RATE_MAX ((1 << 16) - 1)
#define ENC_HEVC_QP_INDEX_MIN -12
#define ENC_HEVC_QP_INDEX_MAX 12
#define ENC_HEVC_LOOP_FILTER_MIN -12
#define ENC_HEVC_LOOP_FILTER_MAX 12
#define ENC_HEVC_LEVEL_MAX 62
#define FRAME_DELTA_DEFAULT 1
const struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void);
const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev);
#endif /* S5P_MFC_OPR_V6_H_ */
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
/* Full unroll of 600 iterations will have total
* program size close to 298k insns and this may
* cause BPF_JMP insn out of 16-bit integer range.
* So limit the unroll size to 150 so the
* total program size is around 80k insns but
* the loop will still execute 600 times.
*/
#define UNROLL_COUNT 150
#include "pyperf.h"
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale MMA7660FC 3-Axis Accelerometer
*
* Copyright (c) 2016, Intel Corporation.
*
* IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c.
*/
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define MMA7660_DRIVER_NAME "mma7660"
#define MMA7660_REG_XOUT 0x00
#define MMA7660_REG_YOUT 0x01
#define MMA7660_REG_ZOUT 0x02
#define MMA7660_REG_OUT_BIT_ALERT BIT(6)
#define MMA7660_REG_MODE 0x07
#define MMA7660_REG_MODE_BIT_MODE BIT(0)
#define MMA7660_REG_MODE_BIT_TON BIT(2)
#define MMA7660_I2C_READ_RETRIES 5
/*
* The accelerometer has one measurement range:
*
* -1.5g - +1.5g (6-bit, signed)
*
* scale = (1.5 + 1.5) * 9.81 / (2^6 - 1) = 0.467142857
*/
#define MMA7660_SCALE_AVAIL "0.467142857"
static const int mma7660_nscale = 467142857;
enum mma7660_mode {
MMA7660_MODE_STANDBY,
MMA7660_MODE_ACTIVE
};
struct mma7660_data {
struct i2c_client *client;
struct mutex lock;
enum mma7660_mode mode;
struct iio_mount_matrix orientation;
};
static const struct iio_mount_matrix *
mma7660_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct mma7660_data *data = iio_priv(indio_dev);
return &data->orientation;
}
static const struct iio_chan_spec_ext_info mma7660_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, mma7660_get_mount_matrix),
{ }
};
static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL);
static struct attribute *mma7660_attributes[] = {
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
NULL,
};
static const struct attribute_group mma7660_attribute_group = {
.attrs = mma7660_attributes
};
#define MMA7660_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
.address = reg, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.ext_info = mma7660_ext_info, \
}
static const struct iio_chan_spec mma7660_channels[] = {
MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
};
static int mma7660_set_mode(struct mma7660_data *data,
enum mma7660_mode mode)
{
int ret;
struct i2c_client *client = data->client;
if (mode == data->mode)
return 0;
ret = i2c_smbus_read_byte_data(client, MMA7660_REG_MODE);
if (ret < 0) {
dev_err(&client->dev, "failed to read sensor mode\n");
return ret;
}
if (mode == MMA7660_MODE_ACTIVE) {
ret &= ~MMA7660_REG_MODE_BIT_TON;
ret |= MMA7660_REG_MODE_BIT_MODE;
} else {
ret &= ~MMA7660_REG_MODE_BIT_TON;
ret &= ~MMA7660_REG_MODE_BIT_MODE;
}
ret = i2c_smbus_write_byte_data(client, MMA7660_REG_MODE, ret);
if (ret < 0) {
dev_err(&client->dev, "failed to change sensor mode\n");
return ret;
}
data->mode = mode;
return ret;
}
static int mma7660_read_accel(struct mma7660_data *data, u8 address)
{
int ret, retries = MMA7660_I2C_READ_RETRIES;
struct i2c_client *client = data->client;
/*
* Read data. If the Alert bit is set, the register was read at
* the same time as the device was attempting to update the content.
* The solution is to read the register again. Do this only
* MMA7660_I2C_READ_RETRIES times to avoid spending too much time
* in the kernel.
*/
do {
ret = i2c_smbus_read_byte_data(client, address);
if (ret < 0) {
dev_err(&client->dev, "register read failed\n");
return ret;
}
} while (retries-- > 0 && ret & MMA7660_REG_OUT_BIT_ALERT);
if (ret & MMA7660_REG_OUT_BIT_ALERT) {
dev_err(&client->dev, "all register read retries failed\n");
return -ETIMEDOUT;
}
return ret;
}
static int mma7660_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mma7660_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&data->lock);
ret = mma7660_read_accel(data, chan->address);
mutex_unlock(&data->lock);
if (ret < 0)
return ret;
*val = sign_extend32(ret, 5);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = mma7660_nscale;
return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
}
return -EINVAL;
}
static const struct iio_info mma7660_info = {
.read_raw = mma7660_read_raw,
.attrs = &mma7660_attribute_group,
};
static int mma7660_probe(struct i2c_client *client)
{
int ret;
struct iio_dev *indio_dev;
struct mma7660_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev) {
dev_err(&client->dev, "iio allocation failed!\n");
return -ENOMEM;
}
data = iio_priv(indio_dev);
data->client = client;
i2c_set_clientdata(client, indio_dev);
mutex_init(&data->lock);
data->mode = MMA7660_MODE_STANDBY;
ret = iio_read_mount_matrix(&client->dev, &data->orientation);
if (ret)
return ret;
indio_dev->info = &mma7660_info;
indio_dev->name = MMA7660_DRIVER_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mma7660_channels;
indio_dev->num_channels = ARRAY_SIZE(mma7660_channels);
ret = mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
if (ret < 0)
return ret;
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "device_register failed\n");
mma7660_set_mode(data, MMA7660_MODE_STANDBY);
}
return ret;
}
static void mma7660_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
iio_device_unregister(indio_dev);
ret = mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
if (ret)
dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n",
ERR_PTR(ret));
}
static int mma7660_suspend(struct device *dev)
{
struct mma7660_data *data;
data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
return mma7660_set_mode(data, MMA7660_MODE_STANDBY);
}
static int mma7660_resume(struct device *dev)
{
struct mma7660_data *data;
data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
return mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
}
static DEFINE_SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend,
mma7660_resume);
static const struct i2c_device_id mma7660_i2c_id[] = {
{ "mma7660" },
{}
};
MODULE_DEVICE_TABLE(i2c, mma7660_i2c_id);
static const struct of_device_id mma7660_of_match[] = {
{ .compatible = "fsl,mma7660" },
{ }
};
MODULE_DEVICE_TABLE(of, mma7660_of_match);
static const struct acpi_device_id mma7660_acpi_id[] = {
{"MMA7660", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id);
static struct i2c_driver mma7660_driver = {
.driver = {
.name = "mma7660",
.pm = pm_sleep_ptr(&mma7660_pm_ops),
.of_match_table = mma7660_of_match,
.acpi_match_table = mma7660_acpi_id,
},
.probe = mma7660_probe,
.remove = mma7660_remove,
.id_table = mma7660_i2c_id,
};
module_i2c_driver(mma7660_driver);
MODULE_AUTHOR("Constantin Musca <[email protected]>");
MODULE_DESCRIPTION("Freescale MMA7660FC 3-Axis Accelerometer driver");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Driver for Silicon Labs SI2165 DVB-C/-T Demodulator
*
* Copyright (C) 2013-2017 Matthias Schwarzott <[email protected]>
*
* References:
* https://www.silabs.com/Support%20Documents/TechnicalDocs/Si2165-short.pdf
*/
#ifndef _DVB_SI2165_H
#define _DVB_SI2165_H
#include <linux/dvb/frontend.h>
enum {
SI2165_MODE_OFF = 0x00,
SI2165_MODE_PLL_EXT = 0x20,
SI2165_MODE_PLL_XTAL = 0x21
};
/* I2C addresses
* possible values: 0x64,0x65,0x66,0x67
*/
struct si2165_platform_data {
/*
* frontend
* returned by driver
*/
struct dvb_frontend **fe;
/* external clock or XTAL */
u8 chip_mode;
/* frequency of external clock or xtal in Hz
* possible values: 4000000, 16000000, 20000000, 240000000, 27000000
*/
u32 ref_freq_hz;
/* invert the spectrum */
bool inversion;
};
#endif /* _DVB_SI2165_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2009
* EMC Symmetrix ioctl Copyright EMC Corporation, 2008
* Author.........: Nigel Hislop <[email protected]>
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
#include <asm/schid.h>
#include <asm/chpid.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
*/
#define DASD_RAW_BLOCK_PER_TRACK 16
#define DASD_RAW_BLOCKSIZE 4096
/* 64k are 128 x 512 byte sectors */
#define DASD_RAW_SECTORS_PER_TRACK 128
MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver");
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_eckd_discipline;
/* The ccw bus type uses this table to find devices that it sends to
* dasd_eckd_probe */
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */
static void *rawpadpage;
#define INIT_CQR_OK 0
#define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2
/* emergency request for reserve/release */
static struct {
struct dasd_ccw_req cqr;
struct ccw1 ccw;
char data[32];
} *dasd_reserve_req;
static DEFINE_MUTEX(dasd_reserve_mutex);
static struct {
struct dasd_ccw_req cqr;
struct ccw1 ccw[2];
char data[40];
} *dasd_vol_info_req;
static DEFINE_MUTEX(dasd_vol_info_mutex);
struct ext_pool_exhaust_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_device *base;
};
/* definitions for the path verification worker */
struct pe_handler_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_ccw_req cqr;
struct ccw1 ccw;
__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
int isglobal;
__u8 tbvpm;
__u8 fcsecpm;
};
static struct pe_handler_work_data *pe_handler_worker;
static DEFINE_MUTEX(dasd_pe_handler_mutex);
struct check_attention_work_data {
struct work_struct worker;
struct dasd_device *device;
__u8 lpum;
};
static int dasd_eckd_ext_pool_id(struct dasd_device *);
static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
struct dasd_device *, struct dasd_device *,
unsigned int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
static int dasd_eckd_query_pprc_status(struct dasd_device *,
struct dasd_pprc_data_sc4 *);
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
dasd_eckd_probe (struct ccw_device *cdev)
{
int ret;
/* set ECKD specific ccw-device options */
ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
if (ret) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
"dasd_eckd_probe: could not set "
"ccw-device options");
return ret;
}
ret = dasd_generic_probe(cdev);
return ret;
}
static int
dasd_eckd_set_online(struct ccw_device *cdev)
{
return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
}
static const int sizes_trk0[] = { 28, 148, 84 };
#define LABEL_SIZE 140
/* head and record addresses of count_area read in analysis ccw */
static const int count_area_head[] = { 0, 0, 0, 0, 1 };
static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
static inline unsigned int
ceil_quot(unsigned int d1, unsigned int d2)
{
return (d1 + (d2 - 1)) / d2;
}
static unsigned int
recs_per_track(struct dasd_eckd_characteristics * rdc,
unsigned int kl, unsigned int dl)
{
int dn, kn;
switch (rdc->dev_type) {
case 0x3380:
if (kl)
return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
ceil_quot(dl + 12, 32));
else
return 1499 / (15 + ceil_quot(dl + 12, 32));
case 0x3390:
dn = ceil_quot(dl + 6, 232) + 1;
if (kl) {
kn = ceil_quot(kl + 6, 232) + 1;
return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
9 + ceil_quot(dl + 6 * dn, 34));
} else
return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
case 0x9345:
dn = ceil_quot(dl + 6, 232) + 1;
if (kl) {
kn = ceil_quot(kl + 6, 232) + 1;
return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
ceil_quot(dl + 6 * dn, 34));
} else
return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
}
return 0;
}
static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
{
geo->cyl = (__u16) cyl;
geo->head = cyl >> 16;
geo->head <<= 4;
geo->head |= head;
}
/*
* calculate failing track from sense data depending if
* it is an EAV device or not
*/
static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
sector_t *track)
{
struct dasd_eckd_private *private = device->private;
u8 *sense = NULL;
u32 cyl;
u8 head;
sense = dasd_get_sense(irb);
if (!sense) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no sense data\n");
return -EINVAL;
}
if (!(sense[27] & DASD_SENSE_BIT_2)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no valid track data\n");
return -EINVAL;
}
if (sense[27] & DASD_SENSE_BIT_3) {
/* enhanced addressing */
cyl = sense[30] << 20;
cyl |= (sense[31] & 0xF0) << 12;
cyl |= sense[28] << 8;
cyl |= sense[29];
} else {
cyl = sense[29] << 8;
cyl |= sense[30];
}
head = sense[31] & 0x0F;
*track = cyl * private->rdc_data.trk_per_cyl + head;
return 0;
}
static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = get_phys_clock(&data->ep_sys_time);
/*
* Ignore return code if XRC is not supported or
* sync clock is switched off
*/
if ((rc && !private->rdc_data.facilities.XRC_supported) ||
rc == -EOPNOTSUPP || rc == -EACCES)
return 0;
/* switch on System Time Stamp - needed for XRC Support */
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
if (ccw) {
ccw->count = sizeof(struct DE_eckd_data);
ccw->flags |= CCW_FLAG_SLI;
}
return rc;
}
static int
define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
unsigned int totrk, int cmd, struct dasd_device *device,
int blksize)
{
struct dasd_eckd_private *private = device->private;
u16 heads, beghead, endhead;
u32 begcyl, endcyl;
int rc = 0;
if (ccw) {
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
switch (cmd) {
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
case DASD_ECKD_CCW_READ_RECORD_ZERO:
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->mask.perm = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
break;
case DASD_ECKD_CCW_READ_TRACK:
case DASD_ECKD_CCW_READ_TRACK_DATA:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
data->blk_size = 0;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->attributes.operation = DASD_BYPASS_CACHE;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->mask.perm = 0x3;
data->mask.auth = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_FULL_TRACK:
data->mask.perm = 0x03;
data->attributes.operation = private->attrib.operation;
data->blk_size = 0;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
data->blk_size = blksize;
rc = set_timestamp(ccw, data, device);
break;
default:
dev_err(&device->cdev->dev,
"0x%x is not a known command\n", cmd);
break;
}
data->attributes.mode = 0x3; /* ECKD */
if ((private->rdc_data.cu_type == 0x2105 ||
private->rdc_data.cu_type == 0x2107 ||
private->rdc_data.cu_type == 0x1750)
&& !(private->uses_cdl && trk < 2))
data->ga_extended |= 0x40; /* Regular Data Format Mode */
heads = private->rdc_data.trk_per_cyl;
begcyl = trk / heads;
beghead = trk % heads;
endcyl = totrk / heads;
endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
data->attributes.operation == DASD_SEQ_ACCESS) {
if (endcyl + private->attrib.nr_cyl < private->real_cyl)
endcyl += private->attrib.nr_cyl;
else
endcyl = (private->real_cyl - 1);
}
set_ch_t(&data->beg_ext, begcyl, beghead);
set_ch_t(&data->end_ext, endcyl, endhead);
return rc;
}
static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
unsigned int trk, unsigned int rec_on_trk,
int count, int cmd, struct dasd_device *device,
unsigned int reclen, unsigned int tlf)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
if (ccw) {
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
ccw->count = 22;
else
ccw->count = 20;
ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(*data));
sector = 0;
if (rec_on_trk) {
switch (private->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(reclen + 6, 232);
d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(reclen + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
data->sector = sector;
/* note: meaning of count depends on the operation
* for record based I/O it's the number of records, but for
* track based I/O it's the number of tracks
*/
data->count = count;
switch (cmd) {
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->operation.orientation = 0x1;
data->operation.operation = 0x03;
data->count++;
break;
case DASD_ECKD_CCW_READ_RECORD_ZERO:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
data->count++;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x01;
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_WRITE_FULL_TRACK:
data->operation.orientation = 0x0;
data->operation.operation = 0x3F;
data->extended_operation = 0x11;
data->length = 0;
data->extended_parameter_length = 0x02;
if (data->count > 8) {
data->extended_parameter[0] = 0xFF;
data->extended_parameter[1] = 0xFF;
data->extended_parameter[1] <<= (16 - count);
} else {
data->extended_parameter[0] = 0xFF;
data->extended_parameter[0] <<= (8 - count);
data->extended_parameter[1] = 0x00;
}
data->sector = 0xFF;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
data->auxiliary.length_valid = 0x1;
data->length = reclen; /* not tlf, as one might think */
data->operation.operation = 0x3F;
data->extended_operation = 0x23;
break;
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_TRACK:
data->operation.orientation = 0x1;
data->operation.operation = 0x0C;
data->extended_parameter_length = 0;
data->sector = 0xFF;
break;
case DASD_ECKD_CCW_READ_TRACK_DATA:
data->auxiliary.length_valid = 0x1;
data->length = tlf;
data->operation.operation = 0x0C;
break;
case DASD_ECKD_CCW_ERASE:
data->length = reclen;
data->auxiliary.length_valid = 0x1;
data->operation.operation = 0x0b;
break;
default:
DBF_DEV_EVENT(DBF_ERR, device,
"fill LRE unknown opcode 0x%x", cmd);
BUG();
}
set_ch_t(&data->seek_addr,
trk / private->rdc_data.trk_per_cyl,
trk % private->rdc_data.trk_per_cyl);
data->search_arg.cyl = data->seek_addr.cyl;
data->search_arg.head = data->seek_addr.head;
data->search_arg.record = rec_on_trk;
}
static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev,
unsigned int format, unsigned int rec_on_trk, int count,
unsigned int blksize, unsigned int tlf)
{
struct dasd_eckd_private *basepriv, *startpriv;
struct LRE_eckd_data *lredata;
struct DE_eckd_data *dedata;
int rc = 0;
basepriv = basedev->private;
startpriv = startdev->private;
dedata = &pfxdata->define_extent;
lredata = &pfxdata->locate_record;
ccw->cmd_code = DASD_ECKD_CCW_PFX;
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
/* prefix data */
if (format > 1) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"PFX LRE unknown format 0x%x", format);
BUG();
return -EINVAL;
}
pfxdata->format = format;
pfxdata->base_address = basepriv->conf.ned->unit_addr;
pfxdata->base_lss = basepriv->conf.ned->ID;
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata->validity.verify_base = 1;
if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata->validity.verify_base = 1;
pfxdata->validity.hyper_pav = 1;
}
rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
/*
* For some commands the System Time Stamp is set in the define extent
* data when XRC is supported. The validity of the time stamp must be
* reflected in the prefix data as well.
*/
if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
if (format == 1) {
locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
basedev, blksize, tlf);
}
return rc;
}
static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev)
{
return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
0, 0, 0, 0, 0);
}
static void
locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
unsigned int rec_on_trk, int no_rec, int cmd,
struct dasd_device * device, int reclen)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
DBF_DEV_EVENT(DBF_INFO, device,
"Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
trk, rec_on_trk, no_rec, cmd, reclen);
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
if (rec_on_trk) {
switch (private->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(reclen + 6, 232);
d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(reclen + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
data->sector = sector;
data->count = no_rec;
switch (cmd) {
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->operation.orientation = 0x1;
data->operation.operation = 0x03;
data->count++;
break;
case DASD_ECKD_CCW_READ_RECORD_ZERO:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
data->count++;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x01;
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_ERASE:
data->length = reclen;
data->auxiliary.last_bytes_used = 0x1;
data->operation.operation = 0x0b;
break;
default:
DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
"opcode 0x%x", cmd);
}
set_ch_t(&data->seek_addr,
trk / private->rdc_data.trk_per_cyl,
trk % private->rdc_data.trk_per_cyl);
data->search_arg.cyl = data->seek_addr.cyl;
data->search_arg.head = data->seek_addr.head;
data->search_arg.record = rec_on_trk;
}
/*
* Returns 1 if the block is one of the special blocks that needs
* to get read/written with the KD variant of the command.
* That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
* DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
* Luckily the KD variants differ only by one bit (0x08) from the
* normal variant. So don't wonder about code like:
* if (dasd_eckd_cdl_special(blk_per_trk, recid))
* ccw->cmd_code |= 0x8;
*/
static inline int
dasd_eckd_cdl_special(int blk_per_trk, int recid)
{
if (recid < 3)
return 1;
if (recid < blk_per_trk)
return 0;
if (recid < 2 * blk_per_trk)
return 1;
return 0;
}
/*
* Returns the record size for the special blocks of the cdl format.
* Only returns something useful if dasd_eckd_cdl_special is true
* for the recid.
*/
static inline int
dasd_eckd_cdl_reclen(int recid)
{
if (recid < 3)
return sizes_trk0[recid];
return LABEL_SIZE;
}
/* create unique id from private structure. */
static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
{
int count;
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, conf->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
memcpy(uid->serial, &conf->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
uid->ssid = conf->gneq->subsystemID;
uid->real_unit_addr = conf->ned->unit_addr;
if (conf->sneq) {
uid->type = conf->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
uid->base_unit_addr = conf->sneq->base_unit_addr;
} else {
uid->type = UA_BASE_DEVICE;
}
if (conf->vdsneq) {
for (count = 0; count < 16; count++) {
sprintf(uid->vduit+2*count, "%02x",
conf->vdsneq->uit[count]);
}
}
}
/*
* Generate device unique id that specifies the physical device.
*/
static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (!private)
return -ENODEV;
if (!private->conf.ned || !private->conf.gneq)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
create_uid(&private->conf, &private->uid);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (private) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
*uid = private->uid;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
return -EINVAL;
}
/*
* compare device UID with data of a given dasd_eckd_private structure
* return 0 for match
*/
static int dasd_eckd_compare_path_uid(struct dasd_device *device,
struct dasd_conf *path_conf)
{
struct dasd_uid device_uid;
struct dasd_uid path_uid;
create_uid(path_conf, &path_uid);
dasd_eckd_get_uid(device, &device_uid);
return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
}
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
__u8 lpm)
{
struct ccw1 *ccw;
/*
* buffer has to start with EBCDIC "V1.0" to show
* support for virtual device SNEQ
*/
rcd_buffer[0] = 0xE5;
rcd_buffer[1] = 0xF1;
rcd_buffer[2] = 0x4B;
rcd_buffer[3] = 0xF0;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
ccw->cda = virt_to_dma32(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 10*HZ;
cqr->lpm = lpm;
cqr->retries = 256;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
}
/*
* Wakeup helper for read_conf
* if the cqr is not done and needs some error recovery
* the buffer has to be re-initialized with the EBCDIC "V1.0"
* to show support for virtual device SNEQ
*/
static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
{
struct ccw1 *ccw;
__u8 *rcd_buffer;
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
rcd_buffer = dma32_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
rcd_buffer[1] = 0xF1;
rcd_buffer[2] = 0x4B;
rcd_buffer[3] = 0xF0;
}
dasd_wakeup_cb(cqr, data);
}
static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
__u8 lpm)
{
struct ciw *ciw;
int rc;
/*
* sanity check: scan for RCD command in extended SenseID data
* some devices do not support RCD
*/
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
return -EOPNOTSUPP;
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->callback = read_conf_cb;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
void **rcd_buffer,
int *rcd_buffer_size, __u8 lpm)
{
struct ciw *ciw;
char *rcd_buf = NULL;
int ret;
struct dasd_ccw_req *cqr;
/*
* sanity check: scan for RCD command in extended SenseID data
* some devices do not support RCD
*/
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
ret = -EOPNOTSUPP;
goto out_error;
}
rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
if (!rcd_buf) {
ret = -ENOMEM;
goto out_error;
}
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
0, /* use rcd_buf as data ara */
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate RCD request");
ret = -ENOMEM;
goto out_error;
}
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
cqr->callback = read_conf_cb;
ret = dasd_sleep_on(cqr);
/*
* on success we update the user input parms
*/
dasd_sfree_request(cqr, cqr->memdev);
if (ret)
goto out_error;
*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
*rcd_buffer = rcd_buf;
return 0;
out_error:
kfree(rcd_buf);
*rcd_buffer = NULL;
*rcd_buffer_size = 0;
return ret;
}
static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
{
struct dasd_sneq *sneq;
int i, count;
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
count = conf->len / sizeof(struct dasd_sneq);
sneq = (struct dasd_sneq *)conf->data;
for (i = 0; i < count; ++i) {
if (sneq->flags.identifier == 1 && sneq->format == 1)
conf->sneq = sneq;
else if (sneq->flags.identifier == 1 && sneq->format == 4)
conf->vdsneq = (struct vd_sneq *)sneq;
else if (sneq->flags.identifier == 2)
conf->gneq = (struct dasd_gneq *)sneq;
else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
conf->ned = (struct dasd_ned *)sneq;
sneq++;
}
if (!conf->ned || !conf->gneq) {
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
return -EINVAL;
}
return 0;
};
static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
{
struct dasd_gneq *gneq;
int i, count, found;
count = conf_len / sizeof(*gneq);
gneq = (struct dasd_gneq *)conf_data;
found = 0;
for (i = 0; i < count; ++i) {
if (gneq->flags.identifier == 2) {
found = 1;
break;
}
gneq++;
}
if (found)
return ((char *)gneq)[18] & 0x07;
else
return 0;
}
static void dasd_eckd_store_conf_data(struct dasd_device *device,
struct dasd_conf_data *conf_data, int chp)
{
struct dasd_eckd_private *private = device->private;
struct channel_path_desc_fmt0 *chp_desc;
struct subchannel_id sch_id;
void *cdp;
/*
* path handling and read_conf allocate data
* free it before replacing the pointer
* also replace the old private->conf_data pointer
* with the new one if this points to the same data
*/
cdp = device->path[chp].conf_data;
if (private->conf.data == cdp) {
private->conf.data = (void *)conf_data;
dasd_eckd_identify_conf_parts(&private->conf);
}
ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
device->path[chp].cssid = sch_id.cssid;
device->path[chp].ssid = sch_id.ssid;
chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
if (chp_desc)
device->path[chp].chpid = chp_desc->chpid;
kfree(chp_desc);
kfree(cdp);
}
static void dasd_eckd_clear_conf_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int i;
private->conf.data = NULL;
private->conf.len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
device->path[i].conf_data = NULL;
device->path[i].cssid = 0;
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
}
}
static void dasd_eckd_read_fc_security(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
u8 esm_valid;
u8 esm[8];
int chp;
int rc;
rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
if (rc) {
for (chp = 0; chp < 8; chp++)
device->path[chp].fc_security = 0;
return;
}
for (chp = 0; chp < 8; chp++) {
if (esm_valid & (0x80 >> chp))
device->path[chp].fc_security = esm[chp];
else
device->path[chp].fc_security = 0;
}
}
static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
uid.vduit[0] ? "." : "", uid.vduit);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
struct dasd_eckd_private *private = device->private;
struct dasd_conf path_conf;
path_conf.data = conf_data;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf))
return 1;
if (dasd_eckd_compare_path_uid(device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_path_uid);
dasd_eckd_get_uid_string(&private->conf, print_device_uid);
dev_err(&device->cdev->dev,
"Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
lpm, print_path_uid, print_device_uid);
return 1;
}
return 0;
}
static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
if (!(lpm & opm))
continue;
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data returned "
"error %d", rc);
return rc;
}
if (conf_data == NULL) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"No configuration data "
"retrieved");
/* no further analysis possible */
dasd_path_add_opm(device, opm);
continue; /* no error */
}
/* save first valid configuration data */
if (!conf_data_saved) {
/* initially clear previously stored conf_data */
dasd_eckd_clear_conf_data(device);
private->conf.data = conf_data;
private->conf.len = conf_len;
if (dasd_eckd_identify_conf_parts(&private->conf)) {
private->conf.data = NULL;
private->conf.len = 0;
kfree(conf_data);
continue;
}
/*
* build device UID that other path data
* can be compared to it
*/
dasd_eckd_generate_uid(device);
conf_data_saved++;
} else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
dasd_path_add_cablepm(device, lpm);
path_err = -EINVAL;
kfree(conf_data);
continue;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
switch (dasd_eckd_path_access(conf_data, conf_len)) {
case 0x02:
dasd_path_add_nppm(device, lpm);
break;
case 0x03:
dasd_path_add_ppm(device, lpm);
break;
}
if (!dasd_path_get_opm(device)) {
dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else {
dasd_path_add_opm(device, lpm);
}
}
return path_err;
}
static u32 get_fcx_max_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
unsigned int mdc;
int tpm;
if (dasd_nofcx)
return 0;
/* is transport mode supported? */
fcx_in_css = css_general_characteristics.fcx;
fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
if (!tpm)
return 0;
mdc = ccw_device_get_mdc(device->cdev, 0);
if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0;
} else {
return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
}
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
unsigned int mdc;
u32 fcx_max_data;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
if (mdc == 0) {
dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n",
mdc, lpm);
return mdc;
}
fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
"on a new path %x is below the active maximum "
"%u\n", fcx_max_data, lpm,
private->fcx_max_data);
return -EACCES;
}
}
return 0;
}
static int rebuild_device_uid(struct dasd_device *device,
struct pe_handler_work_data *data)
{
struct dasd_eckd_private *private = device->private;
__u8 lpm, opm = dasd_path_get_opm(device);
int rc = -ENODEV;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & opm))
continue;
memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
memset(&data->cqr, 0, sizeof(data->cqr));
data->cqr.cpaddr = &data->ccw;
rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
data->rcd_buffer,
lpm);
if (rc) {
if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
continue;
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data "
"returned error %d", rc);
break;
}
memcpy(private->conf.data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
if (dasd_eckd_identify_conf_parts(&private->conf)) {
rc = -ENODEV;
} else /* first valid path is enough */
break;
}
if (!rc)
rc = dasd_eckd_generate_uid(device);
return rc;
}
static void dasd_eckd_path_available_action(struct dasd_device *device,
struct pe_handler_work_data *data)
{
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
char print_uid[DASD_UID_STRLEN];
struct dasd_conf path_conf;
unsigned long flags;
int rc, pos;
opm = 0;
npm = 0;
ppm = 0;
epm = 0;
hpfpm = 0;
cablepm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & data->tbvpm))
continue;
memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
memset(&data->cqr, 0, sizeof(data->cqr));
data->cqr.cpaddr = &data->ccw;
rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
data->rcd_buffer,
lpm);
if (!rc) {
switch (dasd_eckd_path_access(data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE)
) {
case 0x02:
npm |= lpm;
break;
case 0x03:
ppm |= lpm;
break;
}
opm |= lpm;
} else if (rc == -EOPNOTSUPP) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: No configuration "
"data retrieved");
opm |= lpm;
} else if (rc == -EAGAIN) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: device is stopped,"
" try again later");
epm |= lpm;
} else {
dev_warn(&device->cdev->dev,
"Reading device feature codes failed "
"(rc=%d) for new path %x\n", rc, lpm);
continue;
}
if (verify_fcx_max_data(device, lpm)) {
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
hpfpm |= lpm;
continue;
}
/*
* save conf_data for comparison after
* rebuild_device_uid may have changed
* the original data
*/
memcpy(&path_rcd_buf, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
path_conf.data = (void *)&path_rcd_buf;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf)) {
path_conf.data = NULL;
path_conf.len = 0;
continue;
}
/*
* compare path UID with device UID only if at least
* one valid path is left
* in other case the device UID may have changed and
* the first working path UID will be used as device UID
*/
if (dasd_path_get_opm(device) &&
dasd_eckd_compare_path_uid(device, &path_conf)) {
/*
* the comparison was not successful
* rebuild the device UID with at least one
* known path in case a z/VM hyperswap command
* has changed the device
*
* after this compare again
*
* if either the rebuild or the recompare fails
* the path can not be used
*/
if (rebuild_device_uid(device, data) ||
dasd_eckd_compare_path_uid(
device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_uid);
dev_err(&device->cdev->dev,
"The newly added channel path %02X "
"will not be used because it leads "
"to a different device %s\n",
lpm, print_uid);
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
cablepm |= lpm;
continue;
}
}
conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
if (conf_data) {
memcpy(conf_data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
} else {
/*
* path is operational but path config data could not
* be stored due to low mem condition
* add it to the error path mask and schedule a path
* verification later that this could be added again
*/
epm |= lpm;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
/*
* There is a small chance that a path is lost again between
* above path verification and the following modification of
* the device opm mask. We could avoid that race here by using
* yet another path mask, but we rather deal with this unlikely
* situation in dasd_start_IO.
*/
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!dasd_path_get_opm(device) && opm) {
dasd_path_set_opm(device, opm);
dasd_generic_path_operational(device);
} else {
dasd_path_add_opm(device, opm);
}
dasd_path_add_nppm(device, npm);
dasd_path_add_ppm(device, ppm);
if (epm) {
dasd_path_add_tbvpm(device, epm);
dasd_device_set_timer(device, 50);
}
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_path_create_kobj(device, pos);
}
}
static void do_pe_handler_work(struct work_struct *work)
{
struct pe_handler_work_data *data;
struct dasd_device *device;
data = container_of(work, struct pe_handler_work_data, worker);
device = data->device;
/* delay path verification until device was resumed */
if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
schedule_work(work);
return;
}
/* check if path verification already running and delay if so */
if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
schedule_work(work);
return;
}
if (data->tbvpm)
dasd_eckd_path_available_action(device, data);
if (data->fcsecpm)
dasd_eckd_read_fc_security(device);
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
dasd_put_device(device);
if (data->isglobal)
mutex_unlock(&dasd_pe_handler_mutex);
else
kfree(data);
}
static int dasd_eckd_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
struct pe_handler_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
if (mutex_trylock(&dasd_pe_handler_mutex)) {
data = pe_handler_worker;
data->isglobal = 1;
} else {
return -ENOMEM;
}
}
INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
data->device = device;
data->tbvpm = tbvpm;
data->fcsecpm = fcsecpm;
schedule_work(&data->worker);
return 0;
}
static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (!private->fcx_max_data)
private->fcx_max_data = get_fcx_max_data(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
static int dasd_eckd_read_features(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_features *features;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
memset(&private->features, 0, sizeof(struct dasd_rssd_features));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
"allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x41; /* Read Feature Codes */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
memset(features, 0, sizeof(struct dasd_rssd_features));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
ccw->cda = virt_to_dma32(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
features = (struct dasd_rssd_features *) (prssdp + 1);
memcpy(&private->features, features,
sizeof(struct dasd_rssd_features));
} else
dev_warn(&device->cdev->dev, "Reading device feature codes"
" failed with rc=%d\n", rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/* Read Volume Information - Volume Storage Query */
static int dasd_eckd_read_vol_info(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_vsq *vsq;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int useglobal;
int rc;
/* This command cannot be executed on an alias device */
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
sizeof(*prssdp) + sizeof(*vsq), device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate initialization request");
mutex_lock(&dasd_vol_info_mutex);
useglobal = 1;
cqr = &dasd_vol_info_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
cqr->cpaddr = &dasd_vol_info_req->ccw;
cqr->data = &dasd_vol_info_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
/* Prepare for Read Subsystem Data */
prssdp = cqr->data;
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
memset(vsq, 0, sizeof(*vsq));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
/* The command might not be supported. Suppress the error output */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
memcpy(&private->vsq, vsq, sizeof(*vsq));
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the volume storage information failed with rc=%d", rc);
}
if (useglobal)
mutex_unlock(&dasd_vol_info_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_is_ese(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.vol_info.ese;
}
static int dasd_eckd_ext_pool_id(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.extent_pool_id;
}
/*
* This value represents the total amount of available space. As more space is
* allocated by ESE volumes, this value will decrease.
* The data for this value is therefore updated on any call.
*/
static int dasd_eckd_space_configured(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = dasd_eckd_read_vol_info(device);
return rc ? : private->vsq.space_configured;
}
/*
* The value of space allocated by an ESE volume may have changed and is
* therefore updated on any call.
*/
static int dasd_eckd_space_allocated(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = dasd_eckd_read_vol_info(device);
return rc ? : private->vsq.space_allocated;
}
static int dasd_eckd_logical_capacity(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.logical_capacity;
}
static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
{
struct ext_pool_exhaust_work_data *data;
struct dasd_device *device;
struct dasd_device *base;
data = container_of(work, struct ext_pool_exhaust_work_data, worker);
device = data->device;
base = data->base;
if (!base)
base = device;
if (dasd_eckd_space_configured(base) != 0) {
dasd_generic_space_avail(device);
} else {
dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
}
dasd_put_device(device);
kfree(data);
}
static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
struct ext_pool_exhaust_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
dasd_get_device(device);
data->device = device;
if (cqr->block)
data->base = cqr->block->base;
else if (cqr->basedev)
data->base = cqr->basedev;
else
data->base = NULL;
schedule_work(&data->worker);
return 0;
}
static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
struct dasd_rssd_lcq *lcq)
{
struct dasd_eckd_private *private = device->private;
int pool_id = dasd_eckd_ext_pool_id(device);
struct dasd_ext_pool_sum eps;
int i;
for (i = 0; i < lcq->pool_count; i++) {
eps = lcq->ext_pool_sum[i];
if (eps.pool_id == pool_id) {
memcpy(&private->eps, &eps,
sizeof(struct dasd_ext_pool_sum));
}
}
}
/* Read Extent Pool Information - Logical Configuration Query */
static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_lcq *lcq;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
/* This command cannot be executed on an alias device */
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
sizeof(*prssdp) + sizeof(*lcq), device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
/* Prepare for Read Subsystem Data */
prssdp = cqr->data;
memset(prssdp, 0, sizeof(*prssdp));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = virt_to_dma32(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
/* The command might not be supported. Suppress the error output */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
dasd_eckd_cpy_ext_pool_data(device, lcq);
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the logical configuration failed with rc=%d", rc);
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Depending on the device type, the extent size is specified either as
* cylinders per extent (CKD) or size per extent (FBA)
* A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
*/
static int dasd_eckd_ext_size(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_ext_pool_sum eps = private->eps;
if (!eps.flags.extent_size_valid)
return 0;
if (eps.extent_size.size_1G)
return 1113;
if (eps.extent_size.size_16M)
return 21;
return 0;
}
static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.warn_thrshld;
}
static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.flags.capacity_at_warnlevel;
}
/*
* Extent Pool out of space
*/
static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.flags.pool_oos;
}
/*
* Build CP for Perform Subsystem Function - SSC.
*/
static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
int enable_pav)
{
struct dasd_ccw_req *cqr;
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-SSC request");
return cqr;
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
psf_ssc_data->suborder = 0xc0;
if (enable_pav) {
psf_ssc_data->suborder |= 0x08;
psf_ssc_data->reserved[0] = 0x88;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = virt_to_dma32(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/*
* Perform Subsystem Function.
* It is necessary to trigger CIO for channel revalidation since this
* call might change behaviour of DASD devices.
*/
static int
dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
unsigned long flags)
{
struct dasd_ccw_req *cqr;
int rc;
cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
/*
* set flags e.g. turn on failfast, to prevent blocking
* the calling function should handle failed requests
*/
cqr->flags |= flags;
rc = dasd_sleep_on(cqr);
if (!rc)
/* trigger CIO to reprobe devices */
css_schedule_reprobe();
else if (cqr->intrc == -EAGAIN)
rc = -EAGAIN;
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Valide storage server of current device.
*/
static int dasd_eckd_validate_server(struct dasd_device *device,
unsigned long flags)
{
struct dasd_eckd_private *private = device->private;
int enable_pav, rc;
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
if (dasd_nopav || MACHINE_IS_VM)
enable_pav = 0;
else
enable_pav = 1;
rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
"returned rc=%d", private->uid.ssid, rc);
return rc;
}
/*
* worker to do a validate server in case of a lost pathgroup
*/
static void dasd_eckd_do_validate_server(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
kick_validate);
unsigned long flags = 0;
set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
if (dasd_eckd_validate_server(device, flags)
== -EAGAIN) {
/* schedule worker again if failed */
schedule_work(&device->kick_validate);
return;
}
dasd_put_device(device);
}
static void dasd_eckd_kick_validate_server(struct dasd_device *device)
{
dasd_get_device(device);
/* exit if device not online or in offline processing */
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
device->state < DASD_STATE_ONLINE) {
dasd_put_device(device);
return;
}
/* queue call to do_validate_server to the kernel event daemon. */
if (!schedule_work(&device->kick_validate))
dasd_put_device(device);
}
/*
* return if the device is the copy relation primary if a copy relation is active
*/
static int dasd_device_is_primary(struct dasd_device *device)
{
if (!device->copy)
return 1;
if (device->copy->active->device == device)
return 1;
return 0;
}
static int dasd_eckd_alloc_block(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_uid temp_uid;
if (!dasd_device_is_primary(device))
return 0;
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"could not allocate dasd block structure");
return PTR_ERR(block);
}
device->block = block;
block->base = device;
}
return 0;
}
static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->rdc_data.facilities.PPRC_enabled;
}
/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
*/
static int
dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc, i;
int readonly;
unsigned long value;
/* setup work queue for validate server*/
INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
/* setup work queue for summary unit check */
INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
"A channel path group could not be established\n");
return -EIO;
}
if (!ccw_device_is_multipath(device->cdev)) {
dev_info(&device->cdev->dev,
"The DASD is not operating in multipath mode\n");
}
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
device->private = private;
} else {
memset(private, 0, sizeof(*private));
}
/* Invalidate status of initial analysis. */
private->init_cqr_status = -1;
/* Set default cache operations. */
private->attrib.operation = DASD_NORMAL_CACHE;
private->attrib.nr_cyl = 0;
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err1;
/* set some default values */
device->default_expires = DASD_EXPIRES;
device->default_retries = DASD_RETRIES;
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
device->path_interval = DASD_ECKD_PATH_INTERVAL;
device->aq_timeouts = DASD_RETRIES_MAX;
if (private->conf.gneq) {
value = 1;
for (i = 0; i < private->conf.gneq->timeout.value; i++)
value = 10 * value;
value = value * private->conf.gneq->timeout.number;
/* do not accept useless values */
if (value != 0 && value <= DASD_EXPIRES_MAX)
device->default_expires = value;
}
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read device characteristic failed, rc=%d", rc);
goto out_err1;
}
/* setup PPRC for device from devmap */
rc = dasd_devmap_set_device_copy_relation(device->cdev,
dasd_eckd_pprc_enabled(device));
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"copy relation setup failed, rc=%d", rc);
goto out_err1;
}
/* check if block device is needed and allocate in case */
rc = dasd_eckd_alloc_block(device);
if (rc)
goto out_err1;
/* register lcu with alias handling, enable PAV */
rc = dasd_alias_make_device_known_to_lcu(device);
if (rc)
goto out_err2;
dasd_eckd_validate_server(device, 0);
/* device may report different configuration data after LCU setup */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err3;
dasd_eckd_read_fc_security(device);
dasd_path_create_kobjects(device);
/* Read Feature Codes */
dasd_eckd_read_features(device);
/* Read Volume Information */
dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
dasd_eckd_read_ext_pool_info(device);
if ((device->features & DASD_FEATURE_USERAW) &&
!(private->rdc_data.facilities.RT_in_LR)) {
dev_err(&device->cdev->dev, "The storage server does not "
"support raw-track access\n");
rc = -EINVAL;
goto out_err3;
}
/* find the valid cylinder size */
if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
private->rdc_data.long_no_cyl)
private->real_cyl = private->rdc_data.long_no_cyl;
else
private->real_cyl = private->rdc_data.no_cyl;
private->fcx_max_data = get_fcx_max_data(device);
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
private->rdc_data.dev_model,
private->rdc_data.cu_type,
private->rdc_data.cu_model.model,
private->real_cyl,
private->rdc_data.trk_per_cyl,
private->rdc_data.sec_per_trk,
readonly ? ", read-only device" : "");
return 0;
out_err3:
dasd_alias_disconnect_device_from_lcu(device);
out_err2:
dasd_free_block(device->block);
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
}
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
if (!private)
return;
dasd_alias_disconnect_device_from_lcu(device);
private->conf.ned = NULL;
private->conf.sneq = NULL;
private->conf.vdsneq = NULL;
private->conf.gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
dasd_eckd_analysis_ccw(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct eckd_count *count_data;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int cplength, datasize;
int i;
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
NULL);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* Define extent for the first 2 tracks. */
define_extent(ccw++, cqr->data, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
LO_data = cqr->data + sizeof(struct DE_eckd_data);
/* Locate record for the first 4 records on track 0. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, 0, 0, 4,
DASD_ECKD_CCW_READ_COUNT, device, 0);
count_data = private->count_area;
for (i = 0; i < 4; i++) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = virt_to_dma32(count_data);
ccw++;
count_data++;
}
/* Locate record for the first record on track 1. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, 1, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
/* Read count ccw. */
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = virt_to_dma32(count_data);
cqr->block = NULL;
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 255;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
return cqr;
}
/* differentiate between 'no record found' and any other error */
static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
{
char *sense;
if (init_cqr->status == DASD_CQR_DONE)
return INIT_CQR_OK;
else if (init_cqr->status == DASD_CQR_NEED_ERP ||
init_cqr->status == DASD_CQR_FAILED) {
sense = dasd_get_sense(&init_cqr->irb);
if (sense && (sense[1] & SNS1_NO_REC_FOUND))
return INIT_CQR_UNFORMATTED;
else
return INIT_CQR_ERROR;
} else
return INIT_CQR_ERROR;
}
/*
* This is the callback function for the init_analysis cqr. It saves
* the status of the initial analysis ccw before it frees it and kicks
* the device to continue the startup sequence. This will call
* dasd_eckd_do_analysis again (if the devices has not been marked
* for deletion in the meantime).
*/
static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
void *data)
{
struct dasd_device *device = init_cqr->startdev;
struct dasd_eckd_private *private = device->private;
private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
dasd_kick_device(device);
}
static int dasd_eckd_start_analysis(struct dasd_block *block)
{
struct dasd_ccw_req *init_cqr;
init_cqr = dasd_eckd_analysis_ccw(block->base);
if (IS_ERR(init_cqr))
return PTR_ERR(init_cqr);
init_cqr->callback = dasd_eckd_analysis_callback;
init_cqr->callback_data = NULL;
init_cqr->expires = 5*HZ;
/* first try without ERP, so we can later handle unformatted
* devices as special case
*/
clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
init_cqr->retries = 0;
dasd_add_request_head(init_cqr);
return -EAGAIN;
}
static int dasd_eckd_end_analysis(struct dasd_block *block)
{
struct dasd_device *device = block->base;
struct dasd_eckd_private *private = device->private;
struct eckd_count *count_area;
unsigned int sb, blk_per_trk;
int status, i;
struct dasd_ccw_req *init_cqr;
status = private->init_cqr_status;
private->init_cqr_status = -1;
if (status == INIT_CQR_ERROR) {
/* try again, this time with full ERP */
init_cqr = dasd_eckd_analysis_ccw(device);
dasd_sleep_on(init_cqr);
status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
}
if (device->features & DASD_FEATURE_USERAW) {
block->bp_block = DASD_RAW_BLOCKSIZE;
blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
block->s2b_shift = 3;
goto raw;
}
if (status == INIT_CQR_UNFORMATTED) {
dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
return -EMEDIUMTYPE;
} else if (status == INIT_CQR_ERROR) {
dev_err(&device->cdev->dev,
"Detecting the DASD disk layout failed because "
"of an I/O error\n");
return -EIO;
}
private->uses_cdl = 1;
/* Check Track 0 for Compatible Disk Layout */
count_area = NULL;
for (i = 0; i < 3; i++) {
if (private->count_area[i].kl != 4 ||
private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
private->count_area[i].cyl != 0 ||
private->count_area[i].head != count_area_head[i] ||
private->count_area[i].record != count_area_rec[i]) {
private->uses_cdl = 0;
break;
}
}
if (i == 3)
count_area = &private->count_area[3];
if (private->uses_cdl == 0) {
for (i = 0; i < 5; i++) {
if ((private->count_area[i].kl != 0) ||
(private->count_area[i].dl !=
private->count_area[0].dl) ||
private->count_area[i].cyl != 0 ||
private->count_area[i].head != count_area_head[i] ||
private->count_area[i].record != count_area_rec[i])
break;
}
if (i == 5)
count_area = &private->count_area[0];
} else {
if (private->count_area[3].record == 1)
dev_warn(&device->cdev->dev,
"Track 0 has no records following the VTOC\n");
}
if (count_area != NULL && count_area->kl == 0) {
/* we found nothing violating our disk layout */
if (dasd_check_blocksize(count_area->dl) == 0)
block->bp_block = count_area->dl;
}
if (block->bp_block == 0) {
dev_warn(&device->cdev->dev,
"The disk layout of the DASD is not supported\n");
return -EMEDIUMTYPE;
}
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < block->bp_block; sb = sb << 1)
block->s2b_shift++;
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
"DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
(((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
private->uses_cdl ?
"compatible disk layout" : "linux disk layout");
return 0;
}
static int dasd_eckd_do_analysis(struct dasd_block *block)
{
struct dasd_eckd_private *private = block->base->private;
if (private->init_cqr_status < 0)
return dasd_eckd_start_analysis(block);
else
return dasd_eckd_end_analysis(block);
}
static int dasd_eckd_basic_to_ready(struct dasd_device *device)
{
return dasd_alias_add_device(device);
};
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
if (cancel_work_sync(&device->reload_device))
dasd_put_device(device);
if (cancel_work_sync(&device->kick_validate))
dasd_put_device(device);
return 0;
};
static int dasd_eckd_basic_to_known(struct dasd_device *device)
{
return dasd_alias_remove_device(device);
};
static int
dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
struct dasd_eckd_private *private = block->base->private;
if (dasd_check_blocksize(block->bp_block) == 0) {
geo->sectors = recs_per_track(&private->rdc_data,
0, block->bp_block);
}
geo->cylinders = private->rdc_data.no_cyl;
geo->heads = private->rdc_data.trk_per_cyl;
return 0;
}
/*
* Build the TCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer,
int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_device *startdev = NULL;
struct tidaw *last_tidaw = NULL;
struct dasd_ccw_req *cqr;
struct itcw *itcw;
int itcw_size;
int count;
int rc;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
/*
* we're adding 'count' amount of tidaw to the itcw.
* calculate the corresponding itcw_size
*/
itcw_size = itcw_calc_size(0, count, 0);
cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
if (IS_ERR(itcw)) {
rc = -EINVAL;
goto out_err;
}
cqr->cpaddr = itcw_get_tcw(itcw);
rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
sizeof(struct eckd_count),
count * sizeof(struct eckd_count), 0, rpt);
if (rc)
goto out_err;
for (i = 0; i < count; i++) {
last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
sizeof(struct eckd_count));
if (IS_ERR(last_tidaw)) {
rc = -EINVAL;
goto out_err;
}
}
last_tidaw->flags |= TIDAW_FLAGS_LAST;
itcw_finalize(itcw);
cqr->cpmode = 1;
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = startdev->default_retries;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
return cqr;
out_err:
dasd_sfree_request(cqr, startdev);
return ERR_PTR(rc);
}
/*
* Build the CCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer, int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_eckd_private *base_priv;
struct dasd_device *startdev = NULL;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
void *data;
int cplength, datasize;
int use_prefix;
int count;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
base_priv = base->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
use_prefix = base_priv->features.feature[8] & 0x01;
if (use_prefix) {
cplength = 1;
datasize = sizeof(struct PFX_eckd_data);
} else {
cplength = 2;
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data);
}
cplength += count;
cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
data = cqr->data;
ccw = cqr->cpaddr;
if (use_prefix) {
prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
count, 0, 0);
} else {
define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, startdev, 0);
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, data, fdata->start_unit, 0, count,
DASD_ECKD_CCW_READ_COUNT, base, 0);
}
for (i = 0; i < count; i++) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = virt_to_dma32(fmt_buffer);
ccw++;
fmt_buffer++;
}
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = DASD_RETRIES;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
struct format_data_t *fdata, int enable_pav)
{
struct dasd_eckd_private *base_priv;
struct dasd_eckd_private *start_priv;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
struct ch_t address;
struct ccw1 *ccw;
void *data;
int rpt;
int cplength, datasize;
int i, j;
int intensity = 0;
int r0_perm;
int nr_tracks;
int use_prefix;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
base_priv = base->private;
rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
/*
* fdata->intensity is a bit string that tells us what to do:
* Bit 0: write record zero
* Bit 1: write home address, currently not supported
* Bit 2: invalidate tracks
* Bit 3: use OS/390 compatible disk layout (cdl)
* Bit 4: do not allow storage subsystem to modify record zero
* Only some bit combinations do make sense.
*/
if (fdata->intensity & 0x10) {
r0_perm = 0;
intensity = fdata->intensity & ~0x10;
} else {
r0_perm = 1;
intensity = fdata->intensity;
}
use_prefix = base_priv->features.feature[8] & 0x01;
switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
cplength = 2 + (rpt*nr_tracks);
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
rpt * nr_tracks * sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x01: /* Write record zero and format track. */
case 0x09: /* Write record zero and format track, use cdl. */
cplength = 2 + rpt * nr_tracks;
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
rpt * nr_tracks * sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x04: /* Invalidate track. */
case 0x0c: /* Invalidate track, use cdl. */
cplength = 3;
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
break;
default:
dev_warn(&startdev->cdev->dev,
"An I/O control call used incorrect flags 0x%x\n",
fdata->intensity);
return ERR_PTR(-EINVAL);
}
fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(fcp))
return fcp;
start_priv->count++;
data = fcp->data;
ccw = fcp->cpaddr;
switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct PFX_eckd_data *)data)
->define_extent.ga_extended |= 0x04;
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct DE_eckd_data *) data)
->ga_extended |= 0x04;
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt*nr_tracks,
DASD_ECKD_CCW_WRITE_CKD, base,
fdata->blksize);
data += sizeof(struct LO_eckd_data);
break;
case 0x01: /* Write record zero + format track. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO,
base, startdev);
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt * nr_tracks + 1,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
base->block->bp_block);
data += sizeof(struct LO_eckd_data);
break;
case 0x04: /* Invalidate track. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, 1,
DASD_ECKD_CCW_WRITE_CKD, base, 8);
data += sizeof(struct LO_eckd_data);
break;
}
for (j = 0; j < nr_tracks; j++) {
/* calculate cylinder and head for the current track */
set_ch_t(&address,
(fdata->start_unit + j) /
base_priv->rdc_data.trk_per_cyl,
(fdata->start_unit + j) %
base_priv->rdc_data.trk_per_cyl);
if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 0;
ect->kl = 0;
ect->dl = 8;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = virt_to_dma32(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 1;
ect->kl = 0;
ect->dl = 0;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = virt_to_dma32(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = i + 1;
ect->kl = 0;
ect->dl = fdata->blksize;
/*
* Check for special tracks 0-1
* when formatting CDL
*/
if ((intensity & 0x08) &&
address.cyl == 0 && address.head == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
if ((intensity & 0x08) &&
address.cyl == 0 && address.head == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
}
ccw[-1].flags |= CCW_FLAG_CC;
if (i != 0 || j == 0)
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD;
else
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = virt_to_dma32(ect);
ccw++;
}
}
}
fcp->startdev = startdev;
fcp->memdev = startdev;
fcp->basedev = base;
fcp->retries = 256;
fcp->expires = startdev->default_expires * HZ;
fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
}
/*
* Wrapper function to build a CCW request depending on input data
*/
static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav,
int tpm, struct eckd_count *fmt_buffer, int rpt)
{
struct dasd_ccw_req *ccw_req;
if (!fmt_buffer) {
ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
} else {
if (tpm)
ccw_req = dasd_eckd_build_check_tcw(base, fdata,
enable_pav,
fmt_buffer, rpt);
else
ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
fmt_buffer, rpt);
}
return ccw_req;
}
/*
* Sanity checks on format_data
*/
static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
struct format_data_t *fdata)
{
struct dasd_eckd_private *private = base->private;
if (fdata->start_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Start track number %u used in formatting is too big\n",
fdata->start_unit);
return -EINVAL;
}
if (fdata->stop_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Stop track number %u used in formatting is too big\n",
fdata->stop_unit);
return -EINVAL;
}
if (fdata->start_unit > fdata->stop_unit) {
dev_warn(&base->cdev->dev,
"Start track %u used in formatting exceeds end track\n",
fdata->start_unit);
return -EINVAL;
}
if (dasd_check_blocksize(fdata->blksize) != 0) {
dev_warn(&base->cdev->dev,
"The DASD cannot be formatted with block size %u\n",
fdata->blksize);
return -EINVAL;
}
return 0;
}
/*
* This function will process format_data originally coming from an IOCTL
*/
static int dasd_eckd_format_process_data(struct dasd_device *base,
struct format_data_t *fdata,
int enable_pav, int tpm,
struct eckd_count *fmt_buffer, int rpt,
struct irb *irb)
{
struct dasd_eckd_private *private = base->private;
struct dasd_ccw_req *cqr, *n;
struct list_head format_queue;
struct dasd_device *device;
char *sense = NULL;
int old_start, old_stop, format_step;
int step, retry;
int rc;
rc = dasd_eckd_format_sanity_checks(base, fdata);
if (rc)
return rc;
INIT_LIST_HEAD(&format_queue);
old_start = fdata->start_unit;
old_stop = fdata->stop_unit;
if (!tpm && fmt_buffer != NULL) {
/* Command Mode / Format Check */
format_step = 1;
} else if (tpm && fmt_buffer != NULL) {
/* Transport Mode / Format Check */
format_step = DASD_CQR_MAX_CCW / rpt;
} else {
/* Normal Formatting */
format_step = DASD_CQR_MAX_CCW /
recs_per_track(&private->rdc_data, 0, fdata->blksize);
}
do {
retry = 0;
while (fdata->start_unit <= old_stop) {
step = fdata->stop_unit - fdata->start_unit + 1;
if (step > format_step) {
fdata->stop_unit =
fdata->start_unit + format_step - 1;
}
cqr = dasd_eckd_format_build_ccw_req(base, fdata,
enable_pav, tpm,
fmt_buffer, rpt);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
if (list_empty(&format_queue))
goto out;
/*
* not enough memory available, start
* requests retry after first requests
* were finished
*/
retry = 1;
break;
}
goto out_err;
}
list_add_tail(&cqr->blocklist, &format_queue);
if (fmt_buffer) {
step = fdata->stop_unit - fdata->start_unit + 1;
fmt_buffer += rpt * step;
}
fdata->start_unit = fdata->stop_unit + 1;
fdata->stop_unit = old_stop;
}
rc = dasd_sleep_on_queue(&format_queue);
out_err:
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
private = device->private;
if (cqr->status == DASD_CQR_FAILED) {
/*
* Only get sense data if called by format
* check
*/
if (fmt_buffer && irb) {
sense = dasd_get_sense(&cqr->irb);
memcpy(irb, &cqr->irb, sizeof(*irb));
}
rc = -EIO;
}
list_del_init(&cqr->blocklist);
dasd_ffree_request(cqr, device);
private->count--;
}
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* In case fewer than the expected records are on the
* track, we will most likely get a 'No Record Found'
* error (in command mode) or a 'File Protected' error
* (in transport mode). Those particular cases shouldn't
* pass the -EIO to the IOCTL, therefore reset the rc
* and continue.
*/
if (sense &&
(sense[1] & SNS1_NO_REC_FOUND ||
sense[1] & SNS1_FILE_PROTECTED))
retry = 1;
else
goto out;
}
} while (retry);
out:
fdata->start_unit = old_start;
fdata->stop_unit = old_stop;
return rc;
}
static int dasd_eckd_format_device(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav)
{
return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
0, NULL);
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
if (cqr->trkcount != atomic_read(&block->trkcount)) {
/*
* The number of formatted tracks has changed after request
* start and we can not tell if the current track was involved.
* To avoid data corruption treat it as if the current track is
* involved
*/
rc = true;
goto out;
}
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
goto out;
}
}
list_add_tail(&to_format->list, &block->format_list);
out:
spin_unlock_irqrestore(&block->format_lock, flags);
return rc;
}
static void clear_format_track(struct dasd_format_entry *format,
struct dasd_block *block)
{
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
/*
* Callback function to free ESE format requests.
*/
static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
struct dasd_eckd_private *private = device->private;
struct dasd_format_entry *format = data;
clear_format_track(format, cqr->basedev->block);
private->count--;
dasd_ffree_request(cqr, device);
}
static struct dasd_ccw_req *
dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
struct irb *irb)
{
struct dasd_eckd_private *private;
struct dasd_format_entry *format;
struct format_data_t fdata;
unsigned int recs_per_trk;
struct dasd_ccw_req *fcqr;
struct dasd_device *base;
struct dasd_block *block;
unsigned int blksize;
struct request *req;
sector_t first_trk;
sector_t last_trk;
sector_t curr_trk;
int rc;
req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
blksize = block->bp_block;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
format = &startdev->format_entry;
first_trk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return ERR_PTR(rc);
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, startdev,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return ERR_PTR(-EINVAL);
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
if (test_and_set_format_track(format, cqr)) {
/* this is no real error so do not count down retries */
cqr->retries++;
return ERR_PTR(-EEXIST);
}
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
fdata.blksize = blksize;
fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
rc = dasd_eckd_format_sanity_checks(base, &fdata);
if (rc)
return ERR_PTR(-EINVAL);
/*
* We're building the request with PAV disabled as we're reusing
* the former startdev.
*/
fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
if (IS_ERR(fcqr))
return fcqr;
fcqr->callback = dasd_eckd_ese_format_cb;
fcqr->callback_data = (void *) format;
return fcqr;
}
/*
* When data is read from an unformatted area of an ESE volume, this function
* returns zeroed data and thereby mimics a read of zero data.
*
* The first unformatted track is the one that got the NRF error, the address is
* encoded in the sense data.
*
* All tracks before have returned valid data and should not be touched.
* All tracks after the unformatted track might be formatted or not. This is
* currently not known, remember the processed data and return the remainder of
* the request to the blocklayer in __dasd_cleanup_cqr().
*/
static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_eckd_private *private;
sector_t first_trk, last_trk;
sector_t first_blk, last_blk;
unsigned int blksize, off;
unsigned int recs_per_trk;
struct dasd_device *base;
struct req_iterator iter;
struct dasd_block *block;
unsigned int skip_block;
unsigned int blk_count;
struct request *req;
struct bio_vec bv;
sector_t curr_trk;
sector_t end_blk;
char *dst;
int rc;
req = (struct request *) cqr->callback_data;
base = cqr->block->base;
blksize = base->block->bp_block;
block = cqr->block;
private = base->private;
skip_block = 0;
blk_count = 0;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk = last_blk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return rc;
/* sanity check if the current track from sense data is valid */
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, base,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return -EINVAL;
}
/*
* if not the first track got the NRF error we have to skip over valid
* blocks
*/
if (curr_trk != first_trk)
skip_block = curr_trk * recs_per_trk - first_blk;
/* we have no information beyond the current track */
end_blk = (curr_trk + 1) * recs_per_trk;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
if (first_blk + blk_count >= end_blk) {
cqr->proc_bytes = blk_count * blksize;
return 0;
}
if (dst && !skip_block)
memset(dst, 0, blksize);
else
skip_block--;
dst += blksize;
blk_count++;
}
}
return 0;
}
/*
* Helper function to count consecutive records of a single track.
*/
static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
int max)
{
int head;
int i;
head = fmt_buffer[start].head;
/*
* There are 3 conditions where we stop counting:
* - if data reoccurs (same head and record may reoccur), which may
* happen due to the way DASD_ECKD_CCW_READ_COUNT works
* - when the head changes, because we're iterating over several tracks
* then (DASD_ECKD_CCW_READ_COUNT_MT)
* - when we've reached the end of sensible data in the buffer (the
* record will be 0 then)
*/
for (i = start; i < max; i++) {
if (i > start) {
if ((fmt_buffer[i].head == head &&
fmt_buffer[i].record == 1) ||
fmt_buffer[i].head != head ||
fmt_buffer[i].record == 0)
break;
}
}
return i - start;
}
/*
* Evaluate a given range of tracks. Data like number of records, blocksize,
* record ids, and key length are compared with expected data.
*
* If a mismatch occurs, the corresponding error bit is set, as well as
* additional information, depending on the error.
*/
static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
struct format_check_t *cdata,
int rpt_max, int rpt_exp,
int trk_per_cyl, int tpm)
{
struct ch_t geo;
int max_entries;
int count = 0;
int trkcount;
int blksize;
int pos = 0;
int i, j;
int kl;
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
max_entries = trkcount * rpt_max;
for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
/* Calculate the correct next starting position in the buffer */
if (tpm) {
while (fmt_buffer[pos].record == 0 &&
fmt_buffer[pos].dl == 0) {
if (pos++ > max_entries)
break;
}
} else {
if (i != cdata->expect.start_unit)
pos += rpt_max - count;
}
/* Calculate the expected geo values for the current track */
set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
/* Count and check number of records */
count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
if (count < rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
break;
}
if (count > rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
break;
}
for (j = 0; j < count; j++, pos++) {
blksize = cdata->expect.blksize;
kl = 0;
/*
* Set special values when checking CDL formatted
* devices.
*/
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 0) {
if (j < 3) {
blksize = sizes_trk0[j] - 4;
kl = 4;
}
}
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 1) {
blksize = LABEL_SIZE - 44;
kl = 44;
}
/* Check blocksize */
if (fmt_buffer[pos].dl != blksize) {
cdata->result = DASD_FMT_ERR_BLKSIZE;
goto out;
}
/* Check if key length is 0 */
if (fmt_buffer[pos].kl != kl) {
cdata->result = DASD_FMT_ERR_KEY_LENGTH;
goto out;
}
/* Check if record_id is correct */
if (fmt_buffer[pos].cyl != geo.cyl ||
fmt_buffer[pos].head != geo.head ||
fmt_buffer[pos].record != (j + 1)) {
cdata->result = DASD_FMT_ERR_RECORD_ID;
goto out;
}
}
}
out:
/*
* In case of no errors, we need to decrease by one
* to get the correct positions.
*/
if (!cdata->result) {
i--;
pos--;
}
cdata->unit = i;
cdata->num_records = count;
cdata->rec = fmt_buffer[pos].record;
cdata->blksize = fmt_buffer[pos].dl;
cdata->key_length = fmt_buffer[pos].kl;
}
/*
* Check the format of a range of tracks of a DASD.
*/
static int dasd_eckd_check_device_format(struct dasd_device *base,
struct format_check_t *cdata,
int enable_pav)
{
struct dasd_eckd_private *private = base->private;
struct eckd_count *fmt_buffer;
struct irb irb;
int rpt_max, rpt_exp;
int fmt_buffer_size;
int trk_per_cyl;
int trkcount;
int tpm = 0;
int rc;
trk_per_cyl = private->rdc_data.trk_per_cyl;
/* Get maximum and expected amount of records per track */
rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
if (!fmt_buffer)
return -ENOMEM;
/*
* A certain FICON feature subset is needed to operate in transport
* mode. Additionally, the support for transport mode is implicitly
* checked by comparing the buffer size with fcx_max_data. As long as
* the buffer size is smaller we can operate in transport mode and
* process multiple tracks. If not, only one track at once is being
* processed using command mode.
*/
if ((private->features.feature[40] & 0x04) &&
fmt_buffer_size <= private->fcx_max_data)
tpm = 1;
rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
tpm, fmt_buffer, rpt_max, &irb);
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* If our first attempt with transport mode enabled comes back
* with an incorrect length error, we're going to retry the
* check with command mode.
*/
if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
tpm = 0;
rc = dasd_eckd_format_process_data(base, &cdata->expect,
enable_pav, tpm,
fmt_buffer, rpt_max,
&irb);
if (rc)
goto out;
} else {
goto out;
}
}
dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
trk_per_cyl, tpm);
out:
kfree(fmt_buffer);
return rc;
}
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0) {
cqr->status = DASD_CQR_FAILED;
return;
}
cqr->status = DASD_CQR_FILLED;
if (cqr->block && (cqr->startdev != cqr->block->base)) {
dasd_eckd_reset_ccw_to_base_io(cqr);
cqr->startdev = cqr->block->base;
cqr->lpm = dasd_path_get_opm(cqr->block->base);
}
};
static dasd_erp_fn_t
dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
{
struct dasd_device *device = (struct dasd_device *) cqr->startdev;
struct ccw_device *cdev = device->cdev;
switch (cdev->id.cu_type) {
case 0x3990:
case 0x2105:
case 0x2107:
case 0x1750:
return dasd_3990_erp_action;
case 0x9343:
case 0x3880:
default:
return dasd_default_erp_action;
}
}
static dasd_erp_fn_t
dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_postaction;
}
static void dasd_eckd_check_for_device_change(struct dasd_device *device,
struct dasd_ccw_req *cqr,
struct irb *irb)
{
char mask;
char *sense = NULL;
struct dasd_eckd_private *private = device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
/*
* for alias only, not in offline processing
* and only if not suspended
*/
if (!device->block && private->lcu &&
device->state == DASD_STATE_ONLINE &&
!test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
/* schedule worker to reload device */
dasd_reload_device(device);
}
dasd_generic_handle_state_change(device);
return;
}
sense = dasd_get_sense(irb);
if (!sense)
return;
/* summary unit check */
if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd suc: device already notified");
return;
}
sense = dasd_get_sense(irb);
if (!sense) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd suc: no reason code available");
clear_bit(DASD_FLAG_SUC, &device->flags);
return;
}
private->suc_reason = sense[8];
DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
"eckd handle summary unit check: reason",
private->suc_reason);
dasd_get_device(device);
if (!schedule_work(&device->suc_work))
dasd_put_device(device);
return;
}
/* service information message SIM */
if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
dasd_3990_erp_handle_sim(device, sense);
return;
}
/* loss of device reservation is handled via base devices only
* as alias devices may be used with several bases
*/
if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
(sense[7] == 0x3F) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
if (device->features & DASD_FEATURE_FAILONSLCK)
set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
dev_err(&device->cdev->dev,
"The device reservation was lost\n");
}
}
static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
unsigned int first_trk,
unsigned int last_trk)
{
struct dasd_eckd_private *private = device->private;
unsigned int trks_per_vol;
int rc = 0;
trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
if (first_trk >= trks_per_vol) {
dev_warn(&device->cdev->dev,
"Start track number %u used in the space release command is too big\n",
first_trk);
rc = -EINVAL;
} else if (last_trk >= trks_per_vol) {
dev_warn(&device->cdev->dev,
"Stop track number %u used in the space release command is too big\n",
last_trk);
rc = -EINVAL;
} else if (first_trk > last_trk) {
dev_warn(&device->cdev->dev,
"Start track %u used in the space release command exceeds the end track\n",
first_trk);
rc = -EINVAL;
}
return rc;
}
/*
* Helper function to count the amount of involved extents within a given range
* with extent alignment in mind.
*/
static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
{
int cur_pos = 0;
int count = 0;
int tmp;
if (from == to)
return 1;
/* Count first partial extent */
if (from % trks_per_ext != 0) {
tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
if (tmp > to)
tmp = to;
cur_pos = tmp - from + 1;
count++;
}
/* Count full extents */
if (to - (from + cur_pos) + 1 >= trks_per_ext) {
tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
cur_pos = tmp;
}
/* Count last partial extent */
if (cur_pos < to)
count++;
return count;
}
static int dasd_in_copy_relation(struct dasd_device *device)
{
struct dasd_pprc_data_sc4 *temp;
int rc;
if (!dasd_eckd_pprc_enabled(device))
return 0;
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
if (!temp)
return -ENOMEM;
rc = dasd_eckd_query_pprc_status(device, temp);
if (!rc)
rc = temp->dev_info[0].state;
kfree(temp);
return rc;
}
/*
* Release allocated space for a given range or an entire volume.
*/
static struct dasd_ccw_req *
dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
struct request *req, unsigned int first_trk,
unsigned int last_trk, int by_extent)
{
struct dasd_eckd_private *private = device->private;
struct dasd_dso_ras_ext_range *ras_range;
struct dasd_rssd_features *features;
struct dasd_dso_ras_data *ras_data;
u16 heads, beg_head, end_head;
int cur_to_trk, cur_from_trk;
struct dasd_ccw_req *cqr;
u32 beg_cyl, end_cyl;
int copy_relation;
struct ccw1 *ccw;
int trks_per_ext;
size_t ras_size;
size_t size;
int nr_exts;
void *rq;
int i;
if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
return ERR_PTR(-EINVAL);
copy_relation = dasd_in_copy_relation(device);
if (copy_relation < 0)
return ERR_PTR(copy_relation);
rq = req ? blk_mq_rq_to_pdu(req) : NULL;
features = &private->features;
trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
nr_exts = 0;
if (by_extent)
nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
ras_size = sizeof(*ras_data);
size = ras_size + (nr_exts * sizeof(*ras_range));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate RAS request");
return cqr;
}
ras_data = cqr->data;
memset(ras_data, 0, size);
ras_data->order = DSO_ORDER_RAS;
ras_data->flags.vol_type = 0; /* CKD volume */
/* Release specified extents or entire volume */
ras_data->op_flags.by_extent = by_extent;
/*
* This bit guarantees initialisation of tracks within an extent that is
* not fully specified, but is only supported with a certain feature
* subset and for devices not in a copy relation.
*/
if (features->feature[56] & 0x01 && !copy_relation)
ras_data->op_flags.guarantee_init = 1;
ras_data->lss = private->conf.ned->ID;
ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
if (by_extent) {
heads = private->rdc_data.trk_per_cyl;
cur_from_trk = first_trk;
cur_to_trk = first_trk + trks_per_ext -
(first_trk % trks_per_ext) - 1;
if (cur_to_trk > last_trk)
cur_to_trk = last_trk;
ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
for (i = 0; i < nr_exts; i++) {
beg_cyl = cur_from_trk / heads;
beg_head = cur_from_trk % heads;
end_cyl = cur_to_trk / heads;
end_head = cur_to_trk % heads;
set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
set_ch_t(&ras_range->end_ext, end_cyl, end_head);
cur_from_trk = cur_to_trk + 1;
cur_to_trk = cur_from_trk + trks_per_ext - 1;
if (cur_to_trk > last_trk)
cur_to_trk = last_trk;
ras_range++;
}
}
ccw = cqr->cpaddr;
ccw->cda = virt_to_dma32(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = block;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int dasd_eckd_release_space_full(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
rc = dasd_sleep_on_interruptible(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_release_space_trks(struct dasd_device *device,
unsigned int from, unsigned int to)
{
struct dasd_eckd_private *private = device->private;
struct dasd_block *block = device->block;
struct dasd_ccw_req *cqr, *n;
struct list_head ras_queue;
unsigned int device_exts;
int trks_per_ext;
int stop, step;
int cur_pos;
int rc = 0;
int retry;
INIT_LIST_HEAD(&ras_queue);
device_exts = private->real_cyl / dasd_eckd_ext_size(device);
trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
/* Make sure device limits are not exceeded */
step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
cur_pos = from;
do {
retry = 0;
while (cur_pos < to) {
stop = cur_pos + step -
((cur_pos + step) % trks_per_ext) - 1;
if (stop > to)
stop = to;
cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
if (list_empty(&ras_queue))
goto out;
retry = 1;
break;
}
goto err_out;
}
spin_lock_irq(&block->queue_lock);
list_add_tail(&cqr->blocklist, &ras_queue);
spin_unlock_irq(&block->queue_lock);
cur_pos = stop + 1;
}
rc = dasd_sleep_on_queue_interruptible(&ras_queue);
err_out:
list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
device = cqr->startdev;
private = device->private;
spin_lock_irq(&block->queue_lock);
list_del_init(&cqr->blocklist);
spin_unlock_irq(&block->queue_lock);
dasd_sfree_request(cqr, device);
private->count--;
}
} while (retry);
out:
return rc;
}
static int dasd_eckd_release_space(struct dasd_device *device,
struct format_data_t *rdata)
{
if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
return dasd_eckd_release_space_full(device);
else if (rdata->intensity == 0)
return dasd_eckd_release_space_trks(device, rdata->start_unit,
rdata->stop_unit);
else
return -EINVAL;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
struct dasd_eckd_private *private;
dma64_t *idaws;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int off;
int count, cidaw, cplength, datasize;
sector_t recid;
unsigned char cmd, rcmd;
int use_prefix;
struct dasd_device *basedev;
basedev = block->base;
private = basedev->private;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_MT;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_MT;
else
return ERR_PTR(-EINVAL);
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* use the prefix command if available */
use_prefix = private->features.feature[8] & 0x01;
if (use_prefix) {
/* 1x prefix + number of blocks */
cplength = 2 + count;
/* 1x prefix + cidaws*sizeof(long) */
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
cidaw * sizeof(unsigned long);
} else {
/* 1x define extent + 1x locate record + number of blocks */
cplength = 2 + count;
/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
cidaw * sizeof(unsigned long);
}
/* Find out the number of additional locate record ccws for cdl. */
if (private->uses_cdl && first_rec < 2*blk_per_trk) {
if (last_rec >= 2*blk_per_trk)
count = 2*blk_per_trk - first_rec;
cplength += count;
datasize += count*sizeof(struct LO_eckd_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* First ccw is define extent or prefix. */
if (use_prefix) {
if (prefix(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, startdev) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, 0) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data));
}
/* Build locate_record+read/write/ccws. */
LO_data = (struct LO_eckd_data *) (idaws + cidaw);
recid = first_rec;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
/* Only standard blocks so there is just one locate record. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
dst = copy + bv.bv_offset;
}
for (off = 0; off < bv.bv_len; off += blksize) {
sector_t trkid = recid;
unsigned int recoffs = sector_div(trkid, blk_per_trk);
rcmd = cmd;
count = blksize;
/* Locate record for cdl special block ? */
if (private->uses_cdl && recid < 2*blk_per_trk) {
if (dasd_eckd_cdl_special(blk_per_trk, recid)){
rcmd |= 0x8;
count = dasd_eckd_cdl_reclen(recid);
if (count < blksize &&
rq_data_dir(req) == READ)
memset(dst + count, 0xe5,
blksize - count);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++,
trkid, recoffs + 1,
1, rcmd, basedev, count);
}
/* Locate record for standard blocks ? */
if (private->uses_cdl && recid == 2*blk_per_trk) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++,
trkid, recoffs + 1,
last_rec - recid + 1,
cmd, basedev, count);
}
/* Read/write ccw. */
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
dst += blksize;
recid++;
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
return cqr;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
dma64_t *idaws;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *idaw_dst;
unsigned int cidaw, cplength, datasize;
unsigned int tlf;
sector_t recid;
unsigned char cmd;
struct dasd_device *basedev;
unsigned int trkcount, count, count_to_trk_end;
unsigned int idaw_len, seg_len, part_len, len_to_track_end;
unsigned char new_track, end_idaw;
sector_t trkid;
unsigned int recoffs;
basedev = block->base;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
else
return ERR_PTR(-EINVAL);
/* Track based I/O needs IDAWs for each page, and not just for
* 64 bit addresses. We need additional idals for pages
* that get filled from two tracks, so we use the number
* of records as upper limit.
*/
cidaw = last_rec - first_rec + 1;
trkcount = last_trk - first_trk + 1;
/* 1x prefix + one read/write ccw per track */
cplength = 1 + trkcount;
datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* transfer length factor: how many bytes to read from the last track */
if (first_trk == last_trk)
tlf = last_offs - first_offs + 1;
else
tlf = last_offs + 1;
tlf *= blksize;
if (prefix_LRE(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, startdev,
1 /* format */, first_offs + 1,
trkcount, blksize,
tlf) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
/*
* The translation of request into ccw programs must meet the
* following conditions:
* - all idaws but the first and the last must address full pages
* (or 2K blocks on 31-bit)
* - the scope of a ccw and it's idal ends with the track boundaries
*/
idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
recid = first_rec;
new_track = 1;
end_idaw = 0;
len_to_track_end = 0;
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
recoffs = sector_div(trkid, blk_per_trk);
count_to_trk_end = blk_per_trk - recoffs;
count = min((last_rec - recid + 1),
(sector_t)count_to_trk_end);
len_to_track_end = count * blksize;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
new_track = 0;
/* first idaw for a ccw may start anywhere */
if (!idaw_dst)
idaw_dst = dst;
}
/* If we start a new idaw, we must make sure that it
* starts on an IDA_BLOCK_SIZE boundary.
* If we continue an idaw, we must make sure that the
* current segment begins where the so far accumulated
* idaw ends
*/
if (!idaw_dst) {
if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
idaw_dst = dst;
}
if ((idaw_dst + idaw_len) != dst) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
}
part_len = min(seg_len, len_to_track_end);
seg_len -= part_len;
dst += part_len;
idaw_len += part_len;
len_to_track_end -= part_len;
/* collected memory area ends on an IDA_BLOCK border,
* -> create an idaw
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
new_track = 1;
end_idaw = 1;
}
if (end_idaw) {
idaws = idal_create_words(idaws, idaw_dst,
idaw_len);
idaw_dst = NULL;
idaw_len = 0;
end_idaw = 0;
}
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev))
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
static int prepare_itcw(struct itcw *itcw,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev,
struct dasd_device *startdev,
unsigned int rec_on_trk, int count,
unsigned int blksize,
unsigned int total_data_size,
unsigned int tlf,
unsigned int blk_per_trk)
{
struct PFX_eckd_data pfxdata;
struct dasd_eckd_private *basepriv, *startpriv;
struct DE_eckd_data *dedata;
struct LRE_eckd_data *lredata;
struct dcw *dcw;
u32 begcyl, endcyl;
u16 heads, beghead, endhead;
u8 pfx_cmd;
int rc = 0;
int sector = 0;
int dn, d;
/* setup prefix data */
basepriv = basedev->private;
startpriv = startdev->private;
dedata = &pfxdata.define_extent;
lredata = &pfxdata.locate_record;
memset(&pfxdata, 0, sizeof(pfxdata));
pfxdata.format = 1; /* PFX with LRE */
pfxdata.base_address = basepriv->conf.ned->unit_addr;
pfxdata.base_lss = basepriv->conf.ned->ID;
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata.validity.verify_base = 1;
if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata.validity.verify_base = 1;
pfxdata.validity.hyper_pav = 1;
}
switch (cmd) {
case DASD_ECKD_CCW_READ_TRACK_DATA:
dedata->mask.perm = 0x1;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x0C;
lredata->auxiliary.check_bytes = 0x01;
pfx_cmd = DASD_ECKD_CCW_PFX_READ;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
dedata->mask.perm = 0x02;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
rc = set_timestamp(NULL, dedata, basedev);
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x3F;
lredata->extended_operation = 0x23;
lredata->auxiliary.check_bytes = 0x2;
/*
* If XRC is supported the System Time Stamp is set. The
* validity of the time stamp must be reflected in the prefix
* data as well.
*/
if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
case DASD_ECKD_CCW_READ_COUNT_MT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = DASD_BYPASS_CACHE;
dedata->ga_extended |= 0x42;
dedata->blk_size = blksize;
lredata->operation.orientation = 0x2;
lredata->operation.operation = 0x16;
lredata->auxiliary.check_bytes = 0x01;
pfx_cmd = DASD_ECKD_CCW_PFX_READ;
break;
default:
DBF_DEV_EVENT(DBF_ERR, basedev,
"prepare itcw, unknown opcode 0x%x", cmd);
BUG();
break;
}
if (rc)
return rc;
dedata->attributes.mode = 0x3; /* ECKD */
heads = basepriv->rdc_data.trk_per_cyl;
begcyl = trk / heads;
beghead = trk % heads;
endcyl = totrk / heads;
endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
dedata->attributes.operation == DASD_SEQ_ACCESS) {
if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
endcyl += basepriv->attrib.nr_cyl;
else
endcyl = (basepriv->real_cyl - 1);
}
set_ch_t(&dedata->beg_ext, begcyl, beghead);
set_ch_t(&dedata->end_ext, endcyl, endhead);
dedata->ep_format = 0x20; /* records per track is valid */
dedata->ep_rec_per_track = blk_per_trk;
if (rec_on_trk) {
switch (basepriv->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(blksize + 6, 232);
d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(blksize + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
lredata->auxiliary.length_valid = 0;
lredata->auxiliary.length_scope = 0;
lredata->sector = 0xff;
} else {
lredata->auxiliary.length_valid = 1;
lredata->auxiliary.length_scope = 1;
lredata->sector = sector;
}
lredata->auxiliary.imbedded_ccw_valid = 1;
lredata->length = tlf;
lredata->imbedded_ccw = cmd;
lredata->count = count;
set_ch_t(&lredata->seek_addr, begcyl, beghead);
lredata->search_arg.cyl = lredata->seek_addr.cyl;
lredata->search_arg.head = lredata->seek_addr.head;
lredata->search_arg.record = rec_on_trk;
dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
&pfxdata, sizeof(pfxdata), total_data_size);
return PTR_ERR_OR_ZERO(dcw);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
struct dasd_ccw_req *cqr;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int trkcount, ctidaw;
unsigned char cmd;
struct dasd_device *basedev;
unsigned int tlf;
struct itcw *itcw;
struct tidaw *last_tidaw = NULL;
int itcw_op;
size_t itcw_size;
u8 tidaw_flags;
unsigned int seg_len, part_len, len_to_track_end;
unsigned char new_track;
sector_t recid, trkid;
unsigned int offs;
unsigned int count, count_to_trk_end;
int ret;
basedev = block->base;
if (rq_data_dir(req) == READ) {
cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
itcw_op = ITCW_OP_READ;
} else if (rq_data_dir(req) == WRITE) {
cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
itcw_op = ITCW_OP_WRITE;
} else
return ERR_PTR(-EINVAL);
/* trackbased I/O needs address all memory via TIDAWs,
* not just for 64 bit addresses. This allows us to map
* each segment directly to one tidaw.
* In the case of write requests, additional tidaws may
* be needed when a segment crosses a track boundary.
*/
trkcount = last_trk - first_trk + 1;
ctidaw = 0;
rq_for_each_segment(bv, req, iter) {
++ctidaw;
}
if (rq_data_dir(req) == WRITE)
ctidaw += (last_trk - first_trk);
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
/* transfer length factor: how many bytes to read from the last track */
if (first_trk == last_trk)
tlf = last_offs - first_offs + 1;
else
tlf = last_offs + 1;
tlf *= blksize;
itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
if (IS_ERR(itcw)) {
ret = -EINVAL;
goto out_error;
}
cqr->cpaddr = itcw_get_tcw(itcw);
if (prepare_itcw(itcw, first_trk, last_trk,
cmd, basedev, startdev,
first_offs + 1,
trkcount, blksize,
(last_rec - first_rec + 1) * blksize,
tlf, blk_per_trk) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
ret = -EAGAIN;
goto out_error;
}
len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
* We can let the block layer handle this by setting seg_boundary_mask
* to page boundaries and max_segment_size to page size when setting up
* the request queue.
* For write requests, a TIDAW must not cross track boundaries, because
* we have to set the CBC flag on the last tidaw for each track.
*/
if (rq_data_dir(req) == WRITE) {
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
offs = sector_div(trkid, blk_per_trk);
count_to_trk_end = blk_per_trk - offs;
count = min((last_rec - recid + 1),
(sector_t)count_to_trk_end);
len_to_track_end = count * blksize;
recid += count;
new_track = 0;
}
part_len = min(seg_len, len_to_track_end);
seg_len -= part_len;
len_to_track_end -= part_len;
/* We need to end the tidaw at track end */
if (!len_to_track_end) {
new_track = 1;
tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
} else
tidaw_flags = 0;
last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
dst, part_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
}
dst += part_len;
}
}
} else {
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
last_tidaw = itcw_add_tidaw(itcw, 0x00,
dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
}
}
}
last_tidaw->flags |= TIDAW_FLAGS_LAST;
last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
itcw_finalize(itcw);
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->cpmode = 1;
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
}
return cqr;
out_error:
dasd_sfree_request(cqr, startdev);
return ERR_PTR(ret);
}
static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
struct dasd_block *block,
struct request *req)
{
int cmdrtd, cmdwtd;
int use_prefix;
int fcx_multitrack;
struct dasd_eckd_private *private;
struct dasd_device *basedev;
sector_t first_rec, last_rec;
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned int blk_per_trk, blksize;
int cdlspecial;
unsigned int data_size;
struct dasd_ccw_req *cqr;
basedev = block->base;
private = basedev->private;
/* Calculate number of blocks/records per track. */
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
if (blk_per_trk == 0)
return ERR_PTR(-EINVAL);
/* Calculate record id of first and last block. */
first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
fcx_multitrack = private->features.feature[40] & 0x20;
data_size = blk_rq_bytes(req);
if (data_size % blksize)
return ERR_PTR(-EINVAL);
/* tpm write request add CBC data on each track boundary */
if (rq_data_dir(req) == WRITE)
data_size += (last_trk - first_trk) * 4;
/* is read track data and write track data in command mode supported? */
cmdrtd = private->features.feature[9] & 0x20;
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
} else if ((data_size <= private->fcx_max_data)
&& (fcx_multitrack || (first_trk == last_trk))) {
cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
(PTR_ERR(cqr) != -ENOMEM))
cqr = NULL;
} else if (use_prefix &&
(((rq_data_dir(req) == READ) && cmdrtd) ||
((rq_data_dir(req) == WRITE) && cmdwtd))) {
cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
(PTR_ERR(cqr) != -ENOMEM))
cqr = NULL;
}
if (!cqr)
cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
return cqr;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct dasd_block *block,
struct request *req)
{
sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
unsigned int seg_len, len_to_track_end;
unsigned int cidaw, cplength, datasize;
sector_t first_trk, last_trk, sectors;
struct dasd_eckd_private *base_priv;
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int trkcount;
unsigned int size;
unsigned char cmd;
struct bio_vec bv;
struct ccw1 *ccw;
dma64_t *idaws;
int use_prefix;
void *data;
char *dst;
/*
* raw track access needs to be mutiple of 64k and on 64k boundary
* For read requests we can fix an incorrect alignment by padding
* the request with dummy pages.
*/
start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
DASD_RAW_SECTORS_PER_TRACK;
end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
DASD_RAW_SECTORS_PER_TRACK;
basedev = block->base;
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
else
return ERR_PTR(-EINVAL);
/*
* Raw track based I/O needs IDAWs for each page,
* and not just for 64 bit addresses.
*/
cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
/*
* struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
* of extended parameter. This is needed for write full track.
*/
base_priv = basedev->private;
use_prefix = base_priv->features.feature[8] & 0x01;
if (use_prefix) {
cplength = 1 + trkcount;
size = sizeof(struct PFX_eckd_data) + 2;
} else {
cplength = 2 + trkcount;
size = sizeof(struct DE_eckd_data) +
sizeof(struct LRE_eckd_data) + 2;
}
size = ALIGN(size, 8);
datasize = size + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
data = cqr->data;
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
idaws = (dma64_t *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
/* maximum 3390 track size */
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
if (!len_to_track_end) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
/* maximum 3390 track size */
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
}
len_to_track_end -= seg_len;
idaws = idal_create_words(idaws, dst, seg_len);
}
for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ;
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int
dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
int status;
if (!dasd_page_cache)
goto out;
private = cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
else
cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
dst = NULL;
}
ccw++;
recid++;
}
}
out:
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
/*
* Modify ccw/tcw in cqr so it can be started on a base device.
*
* Note that this is not enough to restart the cqr!
* Either reset cqr->startdev as well (summary unit check handling)
* or restart via separate cqr (as in ERP handling).
*/
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
{
struct ccw1 *ccw;
struct PFX_eckd_data *pfxdata;
struct tcw *tcw;
struct tccb *tccb;
struct dcw *dcw;
if (cqr->cpmode == 1) {
tcw = cqr->cpaddr;
tccb = tcw_get_tccb(tcw);
dcw = (struct dcw *)&tccb->tca[0];
pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
pfxdata->validity.verify_base = 0;
pfxdata->validity.hyper_pav = 0;
} else {
ccw = cqr->cpaddr;
pfxdata = cqr->data;
if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
pfxdata->validity.verify_base = 0;
pfxdata->validity.hyper_pav = 0;
}
}
}
#define DASD_ECKD_CHANQ_MAX_SIZE 4
static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
struct dasd_eckd_private *private;
struct dasd_device *startdev;
unsigned long flags;
struct dasd_ccw_req *cqr;
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
return ERR_PTR(-EBUSY);
spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
private->count++;
if ((base->features & DASD_FEATURE_USERAW))
cqr = dasd_eckd_build_cp_raw(startdev, block, req);
else
cqr = dasd_eckd_build_cp(startdev, block, req);
if (IS_ERR(cqr))
private->count--;
spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
return cqr;
}
static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
struct request *req)
{
struct dasd_eckd_private *private;
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
private = cqr->memdev->private;
private->count--;
spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
return dasd_eckd_free_cp(cqr, req);
}
static int
dasd_eckd_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_eckd_private *private = device->private;
info->label_block = 2;
info->FBA_layout = private->uses_cdl ? 0 : 1;
info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
info->confdata_size = min_t(unsigned long, private->conf.len,
sizeof(info->configuration_data));
memcpy(info->configuration_data, private->conf.data,
info->confdata_size);
return 0;
}
/*
* SECTION: ioctl functions for eckd devices.
*/
/*
* Release device ioctl.
* Buils a channel programm to releases a prior reserved
* (see dasd_eckd_reserve) device.
*/
static int
dasd_eckd_release(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Reserve device ioctl.
* Options are set to 'synchronous wait for interrupt' and
* 'timeout the request'. This leads to a terminate IO if
* the interrupt is outstanding for a certain time.
*/
static int
dasd_eckd_reserve(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Steal lock ioctl - unconditional reserve device.
* Buils a channel programm to break a device's reservation.
* (unconditional reserve)
*/
static int
dasd_eckd_steal_lock(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* SNID - Sense Path Group ID
* This ioctl may be used in situations where I/O is stalled due to
* a reserve, so if the normal dasd_smalloc_request fails, we use the
* preallocated dasd_reserve_req.
*/
static int dasd_eckd_snid(struct dasd_device *device,
void __user *argp)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
struct dasd_snid_ioctl_data usrparm;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
return -EFAULT;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
sizeof(struct dasd_snid_data), device,
NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->expires = 10 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->lpm = usrparm.path_mask;
rc = dasd_sleep_on_immediatly(cqr);
/* verify that I/O processing didn't modify the path mask */
if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
rc = -EIO;
if (!rc) {
usrparm.data = *((struct dasd_snid_data *)cqr->data);
if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
rc = -EFAULT;
}
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Read performance statistics
*/
static int
dasd_eckd_performance(struct dasd_device *device, void __user *argp)
{
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_perf_stats_t *stats;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 0;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x01; /* Performance Statistics */
prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
ccw->cda = virt_to_dma32(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
if (copy_to_user(argp, stats,
sizeof(struct dasd_rssd_perf_stats_t)))
rc = -EFAULT;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Get attributes (cache operations)
* Returnes the cache attributes used in Define Extend (DE).
*/
static int
dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
{
struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib = private->attrib;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
rc = 0;
if (copy_to_user(argp, (long *) &attrib,
sizeof(struct attrib_data_t)))
rc = -EFAULT;
return rc;
}
/*
* Set attributes (cache operations)
* Stores the attributes for cache operation to be used in Define Extend (DE).
*/
static int
dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
{
struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
return -EFAULT;
private->attrib = attrib;
dev_info(&device->cdev->dev,
"The DASD cache mode was set to %x (%i cylinder prestage)\n",
private->attrib.operation, private->attrib.nr_cyl);
return 0;
}
/*
* Issue syscall I/O to EMC Symmetrix array.
* CCWs are PSF and RSSD
*/
static int dasd_symm_io(struct dasd_device *device, void __user *argp)
{
struct dasd_symmio_parms usrparm;
char *psf_data, *rssd_result;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
char psf0, psf1;
int rc;
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
return -EACCES;
psf0 = psf1 = 0;
/* Copy parms from caller */
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
if (is_compat_task()) {
/* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
if ((usrparm.psf_data >> 32) != 0)
goto out;
if ((usrparm.rssd_result >> 32) != 0)
goto out;
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
/* at least 2 bytes are accessed and should be allocated */
if (usrparm.psf_data_len < 2) {
DBF_DEV_EVENT(DBF_WARNING, device,
"Symmetrix ioctl invalid data length %d",
usrparm.psf_data_len);
rc = -EINVAL;
goto out;
}
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
if (!psf_data || !rssd_result) {
rc = -ENOMEM;
goto out_free;
}
/* get syscall header from user space */
rc = -EFAULT;
if (copy_from_user(psf_data,
(void __user *)(unsigned long) usrparm.psf_data,
usrparm.psf_data_len))
goto out_free;
psf0 = psf_data[0];
psf1 = psf_data[1];
/* setup CCWs for PSF + RSSD */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
rc = PTR_ERR(cqr);
goto out_free;
}
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 3;
cqr->expires = 10 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Build the ccws */
ccw = cqr->cpaddr;
/* PSF ccw */
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
ccw->cda = virt_to_dma32(psf_data);
ccw++;
/* RSSD ccw */
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
ccw->cda = virt_to_dma32(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
goto out_sfree;
rc = -EFAULT;
if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
rssd_result, usrparm.rssd_result_len))
goto out_sfree;
rc = 0;
out_sfree:
dasd_sfree_request(cqr, cqr->memdev);
out_free:
kfree(rssd_result);
kfree(psf_data);
out:
DBF_DEV_EVENT(DBF_WARNING, device,
"Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
(int) psf0, (int) psf1, rc);
return rc;
}
static int
dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
{
struct dasd_device *device = block->base;
switch (cmd) {
case BIODASDGATTR:
return dasd_eckd_get_attrib(device, argp);
case BIODASDSATTR:
return dasd_eckd_set_attrib(device, argp);
case BIODASDPSRD:
return dasd_eckd_performance(device, argp);
case BIODASDRLSE:
return dasd_eckd_release(device);
case BIODASDRSRV:
return dasd_eckd_reserve(device);
case BIODASDSLCK:
return dasd_eckd_steal_lock(device);
case BIODASDSNID:
return dasd_eckd_snid(device, argp);
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
return -ENOTTY;
}
}
/*
* Dump the range of CCWs into 'page' buffer
* and return number of printed chars.
*/
static void
dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda)));
else
datap = dma32_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
if (count % 32 == 0)
len += sprintf(page + len, "\n");
if (count % 8 == 0)
len += sprintf(page + len, " ");
if (count % 4 == 0)
len += sprintf(page + len, " ");
len += sprintf(page + len, "%02x", datap[count]);
}
len += sprintf(page + len, "\n");
from++;
}
if (len > 0)
dev_err(&device->cdev->dev, "%s", page);
}
static void
dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
u64 *stat;
sense = (u64 *) dasd_get_sense(irb);
stat = (u64 *) &irb->scsw;
if (sense) {
DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
"%016llx %016llx %016llx %016llx",
reason, *stat, *((u32 *) (stat + 1)),
sense[0], sense[1], sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
reason, *stat, *((u32 *) (stat + 1)),
"NO VALID SENSE");
}
}
/*
* Print sense data and related channel program.
* Parts are printed because printk buffer is only 1024 bytes.
*/
static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
struct ccw1 *first, *last, *fail, *from, *to;
struct device *dev;
int len, sl, sct;
char *page;
dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data\n");
return;
}
/* dump the sense data */
len = sprintf(page, "I/O status report:\n");
len += sprintf(page + len,
"in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
len += sprintf(page + len, "Failing CCW: %px\n",
dma32_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
irb->ecw[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len,
"24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len,
"32 Byte: Format: %x Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
dev_err(dev, "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
/* dump the Channel Program (max 140 Bytes per line) */
/* Count CCW and print first CCWs (maximum 7) */
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
dev_err(dev, "Related CP in req: %px\n", req);
dasd_eckd_dump_ccw_range(device, first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
dev_err(dev, "......\n");
}
to = min(fail + 1, last);
dasd_eckd_dump_ccw_range(device, from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
dev_err(dev, "......\n");
}
dasd_eckd_dump_ccw_range(device, from, last, page + len);
}
free_page((unsigned long) page);
}
/*
* Print sense data from a tcw.
*/
static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
char *page;
int len, sl, sct, residual;
struct tsb *tsb;
u8 *sense, *rcq;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, " %s",
"No memory to dump sense data");
return;
}
/* dump the sense data */
len = sprintf(page, "I/O status report:\n");
len += sprintf(page + len,
"in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
irb->scsw.tm.fcxs,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, "Failing TCW: %px\n",
dma32_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb) {
len += sprintf(page + len, "tsb->length %d\n", tsb->length);
len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
len += sprintf(page + len, "tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
len += sprintf(page + len, "residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
len += sprintf(page + len,
"tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, "%02x",
rcq[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
len += sprintf(page + len,
"tsb->tsa.intrg.: not supported yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len,
"Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
sense[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len,
"24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len,
"32 Byte: Format: %x Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
}
dev_err(&device->cdev->dev, "%s", page);
free_page((unsigned long) page);
}
static void dasd_eckd_dump_sense(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
u8 *sense = dasd_get_sense(irb);
/*
* In some cases certain errors might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
return;
if (sense && sense[0] & SNS0_CMD_REJECT &&
test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
return;
if (sense && sense[1] & SNS1_NO_REC_FOUND &&
test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
return;
if (scsw_cstat(&irb->scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
return;
if (scsw_is_tm(&irb->scsw))
dasd_eckd_dump_sense_tcw(device, req, irb);
else
dasd_eckd_dump_sense_ccw(device, req, irb);
}
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
char print_uid[DASD_UID_STRLEN];
int rc, old_base;
struct dasd_uid uid;
unsigned long flags;
/*
* remove device from alias handling to prevent new requests
* from being scheduled on the wrong alias device
*/
dasd_alias_remove_device(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
old_base = private->uid.base_unit_addr;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err;
dasd_eckd_read_fc_security(device);
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
/*
* update unit address configuration and
* add device to alias management
*/
dasd_alias_update_add_device(device);
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
dasd_eckd_get_uid_string(&private->conf, print_uid);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
}
return 0;
out_err:
return -1;
}
static int dasd_eckd_read_message_buffer(struct dasd_device *device,
struct dasd_rssd_messages *messages,
__u8 lpum)
{
struct dasd_rssd_messages *message_buf;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
cqr->lpm = lpum;
retry:
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 10 * HZ;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
/* dasd_sleep_on_immediatly does not do complex error
* recovery so clear erp flag and set retry counter to
* do basic erp */
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 256;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x03; /* Message Buffer */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
message_buf = (struct dasd_rssd_messages *)
(prssdp + 1);
memcpy(messages, message_buf,
sizeof(struct dasd_rssd_messages));
} else if (cqr->lpm) {
/*
* on z/VM we might not be able to do I/O on the requested path
* but instead we get the required information on any path
* so retry with open path mask
*/
cqr->lpm = 0;
goto retry;
} else
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading messages failed with rc=%d\n"
, rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_query_host_access(struct dasd_device *device,
struct dasd_psf_query_host_access *data)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_query_host_access *host_access;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
/* not available for HYPER PAV alias devices */
if (!device->block && private->lcu->pav == HYPER_PAV)
return -EOPNOTSUPP;
/* may not be supported by the storage server */
if (!(private->features.feature[14] & 0x80))
return -EOPNOTSUPP;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(struct dasd_psf_prssd_data) + 1,
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
if (!host_access) {
dasd_sfree_request(cqr, device);
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate host_access buffer");
return -ENOMEM;
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* the command might not be supported, suppress error message */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
*data = *host_access;
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading host access data failed with rc=%d\n",
rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
kfree(host_access);
return rc;
}
/*
* return number of grouped devices
*/
static int dasd_eckd_host_access_count(struct dasd_device *device)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
int count = 0;
int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
if (entry->status_flags & DASD_ECKD_PG_GROUPED)
count++;
}
kfree(access);
return count;
}
/*
* write host access information to a sequential file
*/
static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
char sysplex[9] = "";
int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
/* PGID */
seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
/* FLAGS */
seq_printf(m, "status_flags %02x\n", entry->status_flags);
/* SYSPLEX NAME */
memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
EBCASC(sysplex, sizeof(sysplex));
seq_printf(m, "sysplex_name %8s\n", sysplex);
/* SUPPORTED CYLINDER */
seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
/* TIMESTAMP */
seq_printf(m, "timestamp %lu\n", (unsigned long)
entry->timestamp);
}
kfree(access);
return 0;
}
static struct dasd_device
*copy_relation_find_device(struct dasd_copy_relation *copy,
char *busid)
{
int i;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
return copy->entry[i].device;
}
return NULL;
}
/*
* set the new active/primary device
*/
static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
char *old_busid)
{
int i;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, new_busid,
DASD_BUS_ID_SIZE) == 0) {
copy->active = ©->entry[i];
copy->entry[i].primary = true;
} else if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, old_busid,
DASD_BUS_ID_SIZE) == 0) {
copy->entry[i].primary = false;
}
}
}
/*
* The function will swap the role of a given copy pair.
* During the swap operation the relation of the blockdevice is disconnected
* from the old primary and connected to the new.
*
* IO is paused on the block queue before swap and may be resumed afterwards.
*/
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
struct gendisk *gdp;
copy = device->copy;
if (!copy)
return DASD_COPYPAIRSWAP_INVALID;
primary = copy->active->device;
if (!primary)
return DASD_COPYPAIRSWAP_INVALID;
/* double check if swap has correct primary */
if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
return DASD_COPYPAIRSWAP_PRIMARY;
secondary = copy_relation_find_device(copy, sec_busid);
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
*/
dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
dasd_generic_requeue_all_requests(primary);
/* swap DASD internal device <> block assignment */
block = primary->block;
primary->block = NULL;
secondary->block = block;
block->base = secondary;
/* set new primary device in COPY relation */
copy_pair_set_active(copy, sec_busid, prim_busid);
/* swap blocklayer device link */
gdp = block->gdp;
dasd_add_link_to_gendisk(gdp, secondary);
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
dasd_schedule_device_bh(secondary);
return DASD_COPYPAIRSWAP_SUCCESS;
}
/*
* Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
*/
static int dasd_eckd_query_pprc_status(struct dasd_device *device,
struct dasd_pprc_data_sc4 *data)
{
struct dasd_pprc_data_sc4 *pprc_data;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(*prssdp) + sizeof(*pprc_data) + 1,
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate query PPRC status request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *)cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_PPRCEQ;
prssdp->varies[0] = PPRCEQ_SCOPE_4;
pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*pprc_data);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = virt_to_dma32(pprc_data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
*data = *pprc_data;
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"PPRC Extended Query failed with rc=%d\n",
rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* ECKD NOP - no operation
*/
static int dasd_eckd_nop(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate NOP request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 1;
cqr->expires = 10 * HZ;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_NOP;
ccw->flags |= CCW_FLAG_SLI;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_interruptible(cqr);
if (rc != 0) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"NOP failed with rc=%d\n", rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_device_ping(struct dasd_device *device)
{
return dasd_eckd_nop(device);
}
/*
* Perform Subsystem Function - CUIR response
*/
static int
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
__u32 message_id, __u8 lpum)
{
struct dasd_psf_cuir_response *psf_cuir;
int pos = pathmask_to_pos(lpum);
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_cuir_response),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-CUIR request");
return PTR_ERR(cqr);
}
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
psf_cuir->cc = response;
psf_cuir->chpid = device->path[pos].chpid;
psf_cuir->message_id = message_id;
psf_cuir->cssid = device->path[pos].cssid;
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = virt_to_dma32(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
rc = dasd_sleep_on(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* return configuration data that is referenced by record selector
* if a record selector is specified or per default return the
* conf_data pointer for the path specified by lpum
*/
static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
__u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_conf_data *conf_data;
int path, pos;
if (cuir->record_selector == 0)
goto out;
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
conf_data = device->path[pos].conf_data;
if (conf_data->gneq.record_selector ==
cuir->record_selector)
return conf_data;
}
out:
return device->path[pathmask_to_pos(lpum)].conf_data;
}
/*
* This function determines the scope of a reconfiguration request by
* analysing the path and device selection data provided in the CUIR request.
* Returns a path mask containing CUIR affected paths for the give device.
*
* If the CUIR request does not contain the required information return the
* path mask of the path the attention message for the CUIR request was reveived
* on.
*/
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_conf_data *ref_conf_data;
unsigned long bitmask = 0, mask = 0;
struct dasd_conf_data *conf_data;
unsigned int pos, path;
char *ref_gneq, *gneq;
char *ref_ned, *ned;
int tbcpm = 0;
/* if CUIR request does not specify the scope use the path
the attention message was presented on */
if (!cuir->ned_map ||
!(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
return lpum;
/* get reference conf data */
ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
/* reference ned is determined by ned_map field */
pos = 8 - ffs(cuir->ned_map);
ref_ned = (char *)&ref_conf_data->neds[pos];
ref_gneq = (char *)&ref_conf_data->gneq;
/* transfer 24 bit neq_map to mask */
mask = cuir->neq_map[2];
mask |= cuir->neq_map[1] << 8;
mask |= cuir->neq_map[0] << 16;
for (path = 0; path < 8; path++) {
/* initialise data per path */
bitmask = mask;
conf_data = device->path[path].conf_data;
pos = 8 - ffs(cuir->ned_map);
ned = (char *) &conf_data->neds[pos];
/* compare reference ned and per path ned */
if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
continue;
gneq = (char *)&conf_data->gneq;
/* compare reference gneq and per_path gneq under
24 bit mask where mask bit 0 equals byte 7 of
the gneq and mask bit 24 equals byte 31 */
while (bitmask) {
pos = ffs(bitmask) - 1;
if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
!= 0)
break;
clear_bit(pos, &bitmask);
}
if (bitmask)
continue;
/* device and path match the reference values
add path to CUIR scope */
tbcpm |= 0x80 >> path;
}
return tbcpm;
}
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
unsigned long paths, int action)
{
int pos;
while (paths) {
/* get position of bit in mask */
pos = 8 - ffs(paths);
/* get channel path descriptor from this position */
if (action == CUIR_QUIESCE)
pr_warn("Service on the storage server caused path %x.%02x to go offline",
device->path[pos].cssid,
device->path[pos].chpid);
else if (action == CUIR_RESUME)
pr_info("Path %x.%02x is back online after service on the storage server",
device->path[pos].cssid,
device->path[pos].chpid);
clear_bit(7 - pos, &paths);
}
}
static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
unsigned long tbcpm;
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
/* nothing to do if path is not in use */
if (!(dasd_path_get_opm(device) & tbcpm))
return 0;
if (!(dasd_path_get_opm(device) & ~tbcpm)) {
/* no path would be left if the CUIR action is taken
return error */
return -EINVAL;
}
/* remove device from operational path mask */
dasd_path_remove_opm(device, tbcpm);
dasd_path_add_cuirpm(device, tbcpm);
return tbcpm;
}
/*
* walk through all devices and build a path mask to quiesce them
* return an error if the last path to a device would be removed
*
* if only part of the devices are quiesced and an error
* occurs no onlining necessary, the storage server will
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long paths = 0;
unsigned long flags;
int tbcpm;
/* active devices */
list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
/* inactive devices */
list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist, group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(
get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(
get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
}
/* notify user about all paths affected by CUIR action */
dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
return 0;
out_err:
return tbcpm;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long paths = 0;
int tbcpm;
/*
* the path may have been added through a generic path event before
* only trigger path verification if the path is not already in use
*/
list_for_each_entry_safe(dev, n,
&private->lcu->active_devices,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&private->lcu->inactive_devices,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
group) {
list_for_each_entry_safe(dev, n,
&pavgroup->baselist,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&pavgroup->aliaslist,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
}
/* notify user about all paths affected by CUIR action */
dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
return 0;
}
static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_cuir_message *cuir = messages;
int response;
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR request: %016llx %016llx %016llx %08x",
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
((u32 *)cuir)[3]);
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
response = PSF_CUIR_LAST_PATH;
else
response = PSF_CUIR_COMPLETED;
} else if (cuir->code == CUIR_RESUME) {
/* resume */
dasd_eckd_cuir_resume(device, lpum, cuir);
response = PSF_CUIR_COMPLETED;
} else
response = PSF_CUIR_NOT_SUPPORTED;
dasd_eckd_psf_cuir_response(device, response,
cuir->message_id, lpum);
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR response: %d on message ID %08x", response,
cuir->message_id);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
static void dasd_eckd_oos_resume(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long flags;
spin_lock_irqsave(&private->lcu->lock, flags);
list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
}
spin_unlock_irqrestore(&private->lcu->lock, flags);
}
static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_oos_message *oos = messages;
switch (oos->code) {
case REPO_WARN:
case POOL_WARN:
dev_warn(&device->cdev->dev,
"Extent pool usage has reached a critical value\n");
dasd_eckd_oos_resume(device);
break;
case REPO_EXHAUST:
case POOL_EXHAUST:
dev_warn(&device->cdev->dev,
"Extent pool is exhausted\n");
break;
case REPO_RELIEVE:
case POOL_RELIEVE:
dev_info(&device->cdev->dev,
"Extent pool physical space constraint has been relieved\n");
break;
}
/* In any case, update related data */
dasd_eckd_read_ext_pool_info(device);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
static void dasd_eckd_check_attention_work(struct work_struct *work)
{
struct check_attention_work_data *data;
struct dasd_rssd_messages *messages;
struct dasd_device *device;
int rc;
data = container_of(work, struct check_attention_work_data, worker);
device = data->device;
messages = kzalloc(sizeof(*messages), GFP_KERNEL);
if (!messages) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate attention message buffer");
goto out;
}
rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
if (rc)
goto out;
if (messages->length == ATTENTION_LENGTH_CUIR &&
messages->format == ATTENTION_FORMAT_CUIR)
dasd_eckd_handle_cuir(device, messages, data->lpum);
if (messages->length == ATTENTION_LENGTH_OOS &&
messages->format == ATTENTION_FORMAT_OOS)
dasd_eckd_handle_oos(device, messages, data->lpum);
out:
dasd_put_device(device);
kfree(messages);
kfree(data);
}
static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
{
struct check_attention_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
dasd_get_device(device);
data->device = device;
data->lpum = lpum;
schedule_work(&data->worker);
return 0;
}
static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
{
if (~lpum & dasd_path_get_opm(device)) {
dasd_path_add_nohpfpm(device, lpum);
dasd_path_remove_opm(device, lpum);
dev_err(&device->cdev->dev,
"Channel path %02X lost HPF functionality and is disabled\n",
lpum);
return 1;
}
return 0;
}
static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
dev_err(&device->cdev->dev,
"High Performance FICON disabled\n");
private->fcx_max_data = 0;
}
static int dasd_eckd_hpf_enabled(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->fcx_max_data ? 1 : 0;
}
static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
struct irb *irb)
{
struct dasd_eckd_private *private = device->private;
if (!private->fcx_max_data) {
/* sanity check for no HPF, the error makes no sense */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Trying to disable HPF for a non HPF device");
return;
}
if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
dasd_eckd_disable_hpf_device(device);
} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
return;
dasd_eckd_disable_hpf_device(device);
dasd_path_set_tbvpm(device,
dasd_path_get_hpfpm(device));
}
/*
* prevent that any new I/O ist started on the device and schedule a
* requeue of existing requests
*/
dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
dasd_schedule_requeue(device);
}
static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
{
if (block->base->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
* only need one ccw per track
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
}
return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
}
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
.dev_groups = dasd_dev_groups,
},
.ids = dasd_eckd_ids,
.probe = dasd_eckd_probe,
.remove = dasd_generic_remove,
.set_offline = dasd_generic_set_offline,
.set_online = dasd_eckd_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
.shutdown = dasd_generic_shutdown,
.uc_handler = dasd_generic_uc_handler,
.int_class = IRQIO_DAS,
};
static struct dasd_discipline dasd_eckd_discipline = {
.owner = THIS_MODULE,
.name = "ECKD",
.ebcname = "ECKD",
.check_device = dasd_eckd_check_characteristics,
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
.pe_handler = dasd_eckd_pe_handler,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
.max_sectors = dasd_eckd_max_sectors,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_eckd_handle_terminated_request,
.format_device = dasd_eckd_format_device,
.check_device_format = dasd_eckd_check_device_format,
.erp_action = dasd_eckd_erp_action,
.erp_postaction = dasd_eckd_erp_postaction,
.check_for_device_change = dasd_eckd_check_for_device_change,
.build_cp = dasd_eckd_build_alias_cp,
.free_cp = dasd_eckd_free_alias_cp,
.dump_sense = dasd_eckd_dump_sense,
.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
.fill_info = dasd_eckd_fill_info,
.ioctl = dasd_eckd_ioctl,
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
.check_attention = dasd_eckd_check_attention,
.host_access_count = dasd_eckd_host_access_count,
.hosts_print = dasd_hosts_print,
.handle_hpf_error = dasd_eckd_handle_hpf_error,
.disable_hpf = dasd_eckd_disable_hpf_device,
.hpf_enabled = dasd_eckd_hpf_enabled,
.reset_path = dasd_eckd_reset_path,
.is_ese = dasd_eckd_is_ese,
.space_allocated = dasd_eckd_space_allocated,
.space_configured = dasd_eckd_space_configured,
.logical_capacity = dasd_eckd_logical_capacity,
.release_space = dasd_eckd_release_space,
.ext_pool_id = dasd_eckd_ext_pool_id,
.ext_size = dasd_eckd_ext_size,
.ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
.ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
.ext_pool_oos = dasd_eckd_ext_pool_oos,
.ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
.ese_format = dasd_eckd_ese_format,
.ese_read = dasd_eckd_ese_read,
.pprc_status = dasd_eckd_query_pprc_status,
.pprc_enabled = dasd_eckd_pprc_enabled,
.copy_pair_swap = dasd_eckd_copy_pair_swap,
.device_ping = dasd_eckd_device_ping,
};
static int __init
dasd_eckd_init(void)
{
int ret;
ASCEBC(dasd_eckd_discipline.ebcname, 4);
dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
GFP_KERNEL | GFP_DMA);
if (!dasd_reserve_req)
return -ENOMEM;
dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
GFP_KERNEL | GFP_DMA);
if (!dasd_vol_info_req) {
kfree(dasd_reserve_req);
return -ENOMEM;
}
pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
GFP_KERNEL | GFP_DMA);
if (!pe_handler_worker) {
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
wait_for_device_probe();
else {
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
free_page((unsigned long)rawpadpage);
}
return ret;
}
static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
}
module_init(dasd_eckd_init);
module_exit(dasd_eckd_cleanup);
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2021 Intel Corporation
*/
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
#define trace_iwlmei_sap_data(...)
#else
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA) || defined(TRACE_HEADER_MULTI_READ)
#ifndef __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
enum iwl_sap_data_trace_type {
IWL_SAP_RX_DATA_TO_AIR,
IWL_SAP_TX_DATA_FROM_AIR,
IWL_SAP_RX_DATA_DROPPED_FROM_AIR,
IWL_SAP_TX_DHCP,
};
static inline size_t
iwlmei_sap_data_offset(enum iwl_sap_data_trace_type trace_type)
{
switch (trace_type) {
case IWL_SAP_RX_DATA_TO_AIR:
return 0;
case IWL_SAP_TX_DATA_FROM_AIR:
case IWL_SAP_RX_DATA_DROPPED_FROM_AIR:
return sizeof(struct iwl_sap_hdr);
case IWL_SAP_TX_DHCP:
return sizeof(struct iwl_sap_cb_data);
default:
WARN_ON_ONCE(1);
}
return 0;
}
#endif
#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
#include <linux/tracepoint.h>
#include <linux/skbuff.h>
#include "sap.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlmei_sap_data
TRACE_EVENT(iwlmei_sap_data,
TP_PROTO(const struct sk_buff *skb,
enum iwl_sap_data_trace_type trace_type),
TP_ARGS(skb, trace_type),
TP_STRUCT__entry(
__dynamic_array(u8, data,
skb->len - iwlmei_sap_data_offset(trace_type))
__field(u32, trace_type)
),
TP_fast_assign(
size_t offset = iwlmei_sap_data_offset(trace_type);
__entry->trace_type = trace_type;
skb_copy_bits(skb, offset, __get_dynamic_array(data),
skb->len - offset);
),
TP_printk("sap_data:trace_type %d len %d",
__entry->trace_type, __get_dynamic_array_len(data))
);
/*
* If you add something here, add a stub in case
* !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
*/
#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace-data
#include <trace/define_trace.h>
#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */
|
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <[email protected]>
*
* Contributors:
* Terrence Xu <[email protected]>
* Changbin Du <[email protected]>
* Bing Niu <[email protected]>
* Zhi Wang <[email protected]>
*
*/
#include <drm/display/drm_dp.h>
#include "display/intel_dp_aux_regs.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
#include "i915_drv.h"
#include "i915_reg.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
#define gmbus1_total_byte_count(v) (((v) >> \
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
#define gmbus1_target_addr(v) (((v) & 0xff) >> 1)
#define gmbus1_target_index(v) (((v) >> 8) & 0xff)
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
/* GMBUS0 bits definitions */
#define _GMBUS_PIN_SEL_MASK (0x7)
static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->target_selected) {
gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
if (edid->current_edid_read >= EDID_SIZE) {
gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
return 0;
}
if (!edid->edid_available) {
gvt_vgpu_err("Reading EDID but EDID is not available!\n");
return 0;
}
if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
struct intel_vgpu_edid_data *edid_data =
intel_vgpu_port(vgpu, edid->port)->edid;
chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++;
} else {
gvt_vgpu_err("No EDID available during the reading?\n");
}
return chr;
}
static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
else if (port_select == GMBUS_PIN_4_CNP)
port = PORT_E;
return port;
}
static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
return port;
}
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == GMBUS_PIN_VGADDC)
port = PORT_E;
else if (port_select == GMBUS_PIN_DPC)
port = PORT_C;
else if (port_select == GMBUS_PIN_DPB)
port = PORT_B;
else if (port_select == GMBUS_PIN_DPD)
port = PORT_D;
return port;
}
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
/* GMBUS0 */
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
intel_vgpu_init_i2c_edid(vgpu);
if (pin_select == 0)
return 0;
if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
if (drm_WARN_ON(&i915->drm, port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
u32 target_addr;
u32 wvalue = *(u32 *)p_data;
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
if (!(wvalue & GMBUS_SW_CLR_INT)) {
vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
reset_gmbus_controller(vgpu);
}
/*
* TODO: "This bit is cleared to zero when an event
* causes the HW_RDY bit transition to occur "
*/
} else {
/*
* per bspec setting this bit can cause:
* 1) INT status bit cleared
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
* so GMBUS_SW_RDY should always be cleared
*/
if (wvalue & GMBUS_SW_RDY)
wvalue &= ~GMBUS_SW_RDY;
i2c_edid->gmbus.total_byte_count =
gmbus1_total_byte_count(wvalue);
target_addr = gmbus1_target_addr(wvalue);
/* vgpu gmbus only support EDID */
if (target_addr == EDID_ADDR) {
i2c_edid->target_selected = true;
} else if (target_addr != 0) {
gvt_dbg_dpy(
"vgpu%d: unsupported gmbus target addr(0x%x)\n"
" gmbus operations will be ignored.\n",
vgpu->id, target_addr);
}
if (wvalue & GMBUS_CYCLE_INDEX)
i2c_edid->current_edid_read =
gmbus1_target_index(wvalue);
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
switch (gmbus1_bus_cycle(wvalue)) {
case GMBUS_NOCYCLE:
break;
case GMBUS_STOP:
/* From spec:
* This can only cause a STOP to be generated
* if a GMBUS cycle is generated, the GMBUS is
* currently in a data/wait/idle phase, or it is in a
* WAIT phase
*/
if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
!= GMBUS_NOCYCLE) {
intel_vgpu_init_i2c_edid(vgpu);
/* After the 'stop' cycle, hw state would become
* 'stop phase' and then 'idle phase' after a
* few milliseconds. In emulation, we just set
* it as 'idle phase' ('stop phase' is not
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
case IDX_NS_W:
case NIDX_STOP:
case IDX_STOP:
/* From hw spec the GMBUS phase
* transition like this:
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
break;
}
/*
* From hw spec the WAIT state will be
* cleared:
* (1) in a new GMBUS cycle
* (2) by generating a stop
*/
vgpu_vreg(vgpu, offset) = wvalue;
}
return 0;
}
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
drm_WARN_ON(&i915->drm, 1);
return 0;
}
static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int i;
unsigned char byte_data;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int byte_left = i2c_edid->gmbus.total_byte_count -
i2c_edid->current_edid_read;
int byte_count = byte_left;
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
if (byte_count > 4)
byte_count = 4;
for (i = 0; i < byte_count; i++) {
byte_data = edid_get_byte(vgpu);
reg_data |= (byte_data << (i << 3));
}
memcpy(&vgpu_vreg(vgpu, offset), ®_data, byte_count);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
if (byte_left <= 4) {
switch (i2c_edid->gmbus.cycle_type) {
case NIDX_STOP:
case IDX_STOP:
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
break;
case NIDX_NS_W:
case IDX_NS_W:
default:
i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
break;
}
intel_vgpu_init_i2c_edid(vgpu);
}
/*
* Read GMBUS3 during send operation,
* return the latest written value
*/
} else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
}
return 0;
}
static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = vgpu_vreg(vgpu, offset);
if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
memcpy(p_data, (void *)&value, bytes);
return 0;
}
static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 wvalue = *(u32 *)p_data;
if (wvalue & GMBUS_INUSE)
vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
/* All other bits are read-only */
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
* @vgpu: a vGPU
* @offset: reg offset
* @p_data: data return buffer
* @bytes: access data length
*
* This function is used to emulate gmbus register mmio read
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
* @vgpu: a vGPU
* @offset: reg offset
* @p_data: data return buffer
* @bytes: access data length
*
* This function is used to emulate gmbus register mmio write
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
return 0;
}
enum {
AUX_CH_CTL = 0,
AUX_CH_DATA1,
AUX_CH_DATA2,
AUX_CH_DATA3,
AUX_CH_DATA4,
AUX_CH_DATA5
};
static inline int get_aux_ch_reg(unsigned int offset)
{
int reg;
switch (offset & 0xff) {
case 0x10:
reg = AUX_CH_CTL;
break;
case 0x14:
reg = AUX_CH_DATA1;
break;
case 0x18:
reg = AUX_CH_DATA2;
break;
case 0x1c:
reg = AUX_CH_DATA3;
break;
case 0x20:
reg = AUX_CH_DATA4;
break;
case 0x24:
reg = AUX_CH_DATA5;
break;
default:
reg = -1;
break;
}
return reg;
}
/**
* intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
* @vgpu: a vGPU
* @port_idx: port index
* @offset: reg offset
* @p_data: write ptr
*
* This function is used to emulate AUX channel register write
*
*/
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
u32 value = *(u32 *)p_data;
int aux_data_for_write = 0;
int reg = get_aux_ch_reg(offset);
if (reg != AUX_CH_CTL) {
vgpu_vreg(vgpu, offset) = value;
return;
}
msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value);
// check the msg in DATA register.
msg = vgpu_vreg(vgpu, offset + 4);
addr = (msg >> 8) & 0xffff;
ctrl = (msg >> 24) & 0xff;
op = ctrl >> 4;
if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
/* The ctl write to clear some states */
return;
}
/* Always set the wanted value for vms. */
ret_msg_size = (((op & 0x1) == DP_AUX_I2C_READ) ? 2 : 1);
vgpu_vreg(vgpu, offset) =
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size);
if (msg_length == 3) {
if (!(op & DP_AUX_I2C_MOT)) {
/* stop */
intel_vgpu_init_i2c_edid(vgpu);
} else {
/* start or restart */
i2c_edid->aux_ch.i2c_over_aux_ch = true;
i2c_edid->aux_ch.aux_ch_mot = true;
if (addr == 0) {
/* reset the address */
intel_vgpu_init_i2c_edid(vgpu);
} else if (addr == EDID_ADDR) {
i2c_edid->state = I2C_AUX_CH;
i2c_edid->port = port_idx;
i2c_edid->target_selected = true;
if (intel_vgpu_has_monitor_on_port(vgpu,
port_idx) &&
intel_vgpu_port_is_dp(vgpu, port_idx))
i2c_edid->edid_available = true;
}
}
} else if ((op & 0x1) == DP_AUX_I2C_WRITE) {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
* the WRITE operation is ignored. It is good enough to
* support the gfx driver to do EDID access.
*/
} else {
if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ))
return;
if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->target_selected) {
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
} else
aux_data_for_write = (0xff << 16);
}
/* write the return value in AUX_CH_DATA reg which includes:
* ACK of I2C_WRITE
* returned byte if it is READ
*/
aux_data_for_write |= DP_AUX_I2C_REPLY_ACK << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
/**
* intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU i2c edid emulation stuffs
*
*/
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
edid->state = I2C_NOT_SPECIFIED;
edid->port = -1;
edid->target_selected = false;
edid->edid_available = false;
edid->current_edid_read = 0;
memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
edid->aux_ch.i2c_over_aux_ch = false;
edid->aux_ch.aux_ch_mot = false;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Intel Corporation */
#ifndef __IPU3_UTIL_H
#define __IPU3_UTIL_H
struct device;
struct imgu_device;
#define IPU3_CSS_POOL_SIZE 4
/**
* struct imgu_css_map - store DMA mapping info for buffer
*
* @size: size of the buffer in bytes.
* @vaddr: kernel virtual address.
* @daddr: iova dma address to access IPU3.
* @pages: pages mapped to this buffer
*/
struct imgu_css_map {
size_t size;
void *vaddr;
dma_addr_t daddr;
struct page **pages;
};
/**
* struct imgu_css_pool - circular buffer pool definition
*
* @entry: array with IPU3_CSS_POOL_SIZE elements.
* @entry.param: a &struct imgu_css_map for storing the mem mapping.
* @entry.valid: used to mark if the entry has valid data.
* @last: write pointer, initialized to IPU3_CSS_POOL_SIZE.
*/
struct imgu_css_pool {
struct {
struct imgu_css_map param;
bool valid;
} entry[IPU3_CSS_POOL_SIZE];
u32 last;
};
int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
struct imgu_css_map *map, size_t size);
void imgu_css_pool_cleanup(struct imgu_device *imgu,
struct imgu_css_pool *pool);
int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
size_t size);
void imgu_css_pool_get(struct imgu_css_pool *pool);
void imgu_css_pool_put(struct imgu_css_pool *pool);
const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
u32 last);
#endif
|
// SPDX-License-Identifier: GPL-2.0
/* smp.c: Sparc SMP support.
*
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 1998 Jakub Jelinek ([email protected])
* Copyright (C) 2004 Keith M Wesolowski ([email protected])
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/cpudata.h>
#include <asm/timer.h>
#include <asm/leon.h>
#include "kernel.h"
#include "irq.h"
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
const struct sparc32_ipi_ops *sparc32_ipi_ops;
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
* places the current byte at the effective address into dest_reg and
* places 0xff there afterwards. Pretty lame locking primitive
* compared to the Alpha and the Intel no? Most Sparcs have 'swap'
* instruction which is much better...
*/
void smp_store_cpu_info(int id)
{
int cpu_node;
int mid;
cpu_data(id).udelay_val = loops_per_jiffy;
cpu_find_by_mid(id, &cpu_node);
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
mid = cpu_get_hwmid(cpu_node);
if (mid < 0) {
printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node);
mid = 0;
}
cpu_data(id).mid = mid;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
int cpu, num = 0;
for_each_online_cpu(cpu) {
num++;
bogosum += cpu_data(cpu).udelay_val;
}
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
num, bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
switch(sparc_cpu_model) {
case sun4m:
smp4m_smp_done();
break;
case sun4d:
smp4d_smp_done();
break;
case sparc_leon:
leon_smp_done();
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
}
void cpu_panic(void)
{
printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
panic("SMP bolixed\n");
}
struct linux_prom_registers smp_penguin_ctable = { 0 };
void arch_smp_send_reschedule(int cpu)
{
/*
* CPU model dependent way of implementing IPI generation targeting
* a single CPU. The trap handler needs only to do trap entry/return
* to call schedule.
*/
sparc32_ipi_ops->resched(cpu);
}
void smp_send_stop(void)
{
}
void arch_send_call_function_single_ipi(int cpu)
{
/* trigger one IPI single call on one CPU */
sparc32_ipi_ops->single(cpu);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
/* trigger IPI mask call on each CPU */
for_each_cpu(cpu, mask)
sparc32_ipi_ops->mask_one(cpu);
}
void smp_resched_interrupt(void)
{
irq_enter();
scheduler_ipi();
local_cpu_data().irq_resched_count++;
irq_exit();
/* re-schedule routine called by interrupt return code. */
}
void smp_call_function_single_interrupt(void)
{
irq_enter();
generic_smp_call_function_single_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i, cpuid, extra;
printk("Entering SMP Mode...\n");
extra = 0;
for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
if (cpuid >= NR_CPUS)
extra++;
}
/* i = number of cpus */
if (extra && max_cpus > i - extra)
printk("Warning: NR_CPUS is too low to start all cpus\n");
smp_store_cpu_info(boot_cpu_id);
switch(sparc_cpu_model) {
case sun4m:
smp4m_boot_cpus();
break;
case sun4d:
smp4d_boot_cpus();
break;
case sparc_leon:
leon_boot_cpus();
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
}
/* Set this up early so that things like the scheduler can init
* properly. We use the same cpu mask for both the present and
* possible cpu map.
*/
void __init smp_setup_cpu_possible_map(void)
{
int instance, mid;
instance = 0;
while (!cpu_find_by_instance(instance, NULL, &mid)) {
if (mid < NR_CPUS) {
set_cpu_possible(mid, true);
set_cpu_present(mid, true);
}
instance++;
}
}
void __init smp_prepare_boot_cpu(void)
{
int cpuid = hard_smp_processor_id();
if (cpuid >= NR_CPUS) {
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
prom_halt();
}
if (cpuid != 0)
printk("boot cpu id != 0, this could work but is untested\n");
current_thread_info()->cpu = cpuid;
set_cpu_online(cpuid, true);
set_cpu_possible(cpuid, true);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret=0;
switch(sparc_cpu_model) {
case sun4m:
ret = smp4m_boot_one_cpu(cpu, tidle);
break;
case sun4d:
ret = smp4d_boot_one_cpu(cpu, tidle);
break;
case sparc_leon:
ret = leon_boot_one_cpu(cpu, tidle);
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask);
while (!cpu_online(cpu))
mb();
}
return ret;
}
static void arch_cpu_pre_starting(void *arg)
{
local_ops->cache_all();
local_ops->tlb_all();
switch(sparc_cpu_model) {
case sun4m:
sun4m_cpu_pre_starting(arg);
break;
case sun4d:
sun4d_cpu_pre_starting(arg);
break;
case sparc_leon:
leon_cpu_pre_starting(arg);
break;
default:
BUG();
}
}
static void arch_cpu_pre_online(void *arg)
{
unsigned int cpuid = hard_smp_processor_id();
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_ops->cache_all();
local_ops->tlb_all();
switch(sparc_cpu_model) {
case sun4m:
sun4m_cpu_pre_online(arg);
break;
case sun4d:
sun4d_cpu_pre_online(arg);
break;
case sparc_leon:
leon_cpu_pre_online(arg);
break;
default:
BUG();
}
}
static void sparc_start_secondary(void *arg)
{
unsigned int cpu;
/*
* SMP booting is extremely fragile in some architectures. So run
* the cpu initialization code first before anything else.
*/
arch_cpu_pre_starting(arg);
cpu = smp_processor_id();
notify_cpu_starting(cpu);
arch_cpu_pre_online(arg);
/* Set the CPU in the cpu_online_mask */
set_cpu_online(cpu, true);
/* Enable local interrupts now */
local_irq_enable();
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/* We should never reach here! */
BUG();
}
void smp_callin(void)
{
sparc_start_secondary(NULL);
}
void smp_bogo(struct seq_file *m)
{
int i;
for_each_online_cpu(i) {
seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n",
i,
cpu_data(i).udelay_val/(500000/HZ),
(cpu_data(i).udelay_val/(5000/HZ))%100);
}
}
void smp_info(struct seq_file *m)
{
int i;
seq_printf(m, "State:\n");
for_each_online_cpu(i)
seq_printf(m, "CPU%d\t\t: online\n", i);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#include "system_global.h"
#include "ia_css_types.h"
#include "ia_css_macc1_5_table.host.h"
/* Multi-Axes Color Correction table for ISP2.
* 64values = 2x2matrix for 16area, [s1.12]
* ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
*/
const struct ia_css_macc1_5_table default_macc1_5_table = {
{
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096
}
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.