code
stringlengths 0
23.9M
|
---|
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef VC4_PACKET_H
#define VC4_PACKET_H
#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
enum vc4_packet {
VC4_PACKET_HALT = 0,
VC4_PACKET_NOP = 1,
VC4_PACKET_FLUSH = 4,
VC4_PACKET_FLUSH_ALL = 5,
VC4_PACKET_START_TILE_BINNING = 6,
VC4_PACKET_INCREMENT_SEMAPHORE = 7,
VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
VC4_PACKET_BRANCH = 16,
VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
VC4_PACKET_GL_SHADER_STATE = 64,
VC4_PACKET_NV_SHADER_STATE = 65,
VC4_PACKET_VG_SHADER_STATE = 66,
VC4_PACKET_CONFIGURATION_BITS = 96,
VC4_PACKET_FLAT_SHADE_FLAGS = 97,
VC4_PACKET_POINT_SIZE = 98,
VC4_PACKET_LINE_WIDTH = 99,
VC4_PACKET_RHT_X_BOUNDARY = 100,
VC4_PACKET_DEPTH_OFFSET = 101,
VC4_PACKET_CLIP_WINDOW = 102,
VC4_PACKET_VIEWPORT_OFFSET = 103,
VC4_PACKET_Z_CLIPPING = 104,
VC4_PACKET_CLIPPER_XY_SCALING = 105,
VC4_PACKET_CLIPPER_Z_SCALING = 106,
VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
VC4_PACKET_CLEAR_COLORS = 114,
VC4_PACKET_TILE_COORDINATES = 115,
/* Not an actual hardware packet -- this is what we use to put
* references to GEM bos in the command stream, since we need the u32
* int the actual address packet in order to store the offset from the
* start of the BO.
*/
VC4_PACKET_GEM_HANDLES = 254,
} __attribute__ ((__packed__));
#define VC4_PACKET_HALT_SIZE 1
#define VC4_PACKET_NOP_SIZE 1
#define VC4_PACKET_FLUSH_SIZE 1
#define VC4_PACKET_FLUSH_ALL_SIZE 1
#define VC4_PACKET_START_TILE_BINNING_SIZE 1
#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
#define VC4_PACKET_BRANCH_SIZE 5
#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
#define VC4_PACKET_POINT_SIZE_SIZE 5
#define VC4_PACKET_LINE_WIDTH_SIZE 5
#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
#define VC4_PACKET_CLIP_WINDOW_SIZE 9
#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
#define VC4_PACKET_Z_CLIPPING_SIZE 9
#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
#define VC4_PACKET_CLEAR_COLORS_SIZE 14
#define VC4_PACKET_TILE_COORDINATES_SIZE 3
#define VC4_PACKET_GEM_HANDLES_SIZE 9
/* Number of multisamples supported. */
#define VC4_MAX_SAMPLES 4
/* Size of a full resolution color or Z tile buffer load/store. */
#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4)
/** @{
* Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
* VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
*/
#define VC4_TILING_FORMAT_LINEAR 0
#define VC4_TILING_FORMAT_T 1
#define VC4_TILING_FORMAT_LT 2
/** @} */
/** @{
*
* low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
* VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
*/
#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
/** @{
*
* low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
* VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
*/
#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
/** @{
*
* byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
*/
#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3)
#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2)
#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1)
#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0)
/** @} */
/** @{
*
* byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
*/
#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15)
#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14)
#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13)
#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12)
#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
/** @} */
/** @{
*
* byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
*/
#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
/** The values of the field are VC4_TILING_FORMAT_* */
#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
#define VC4_LOADSTORE_TILE_BUFFER_Z 3
#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
/** @} */
#define VC4_INDEX_BUFFER_U8 (0 << 4)
#define VC4_INDEX_BUFFER_U16 (1 << 4)
/* This flag is only present in NV shader state. */
#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3)
#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2)
#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1)
#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0)
/** @{ byte 2 of config bits. */
#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1)
#define VC4_CONFIG_BITS_EARLY_Z BIT(0)
/** @} */
/** @{ byte 1 of config bits. */
#define VC4_CONFIG_BITS_Z_UPDATE BIT(7)
/** same values in this 3-bit field as PIPE_FUNC_* */
#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3)
#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0)
/** @} */
/** @{ byte 0 of config bits. */
#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4)
#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3)
#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2)
#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1)
#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0)
/** @} */
/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
#define VC4_BIN_CONFIG_DB_NON_MS BIT(7)
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2)
#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1)
#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0)
/** @} */
/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12)
#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11)
#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10)
#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9)
#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8)
/** The values of the field are VC4_TILING_FORMAT_* */
#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1)
#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0)
#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
enum vc4_texture_data_type {
VC4_TEXTURE_TYPE_RGBA8888 = 0,
VC4_TEXTURE_TYPE_RGBX8888 = 1,
VC4_TEXTURE_TYPE_RGBA4444 = 2,
VC4_TEXTURE_TYPE_RGBA5551 = 3,
VC4_TEXTURE_TYPE_RGB565 = 4,
VC4_TEXTURE_TYPE_LUMINANCE = 5,
VC4_TEXTURE_TYPE_ALPHA = 6,
VC4_TEXTURE_TYPE_LUMALPHA = 7,
VC4_TEXTURE_TYPE_ETC1 = 8,
VC4_TEXTURE_TYPE_S16F = 9,
VC4_TEXTURE_TYPE_S8 = 10,
VC4_TEXTURE_TYPE_S16 = 11,
VC4_TEXTURE_TYPE_BW1 = 12,
VC4_TEXTURE_TYPE_A4 = 13,
VC4_TEXTURE_TYPE_A1 = 14,
VC4_TEXTURE_TYPE_RGBA64 = 15,
VC4_TEXTURE_TYPE_RGBA32R = 16,
VC4_TEXTURE_TYPE_YUV422R = 17,
};
#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
#define VC4_TEX_P0_OFFSET_SHIFT 12
#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
#define VC4_TEX_P0_CSWIZ_SHIFT 10
#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
#define VC4_TEX_P0_CMMODE_SHIFT 9
#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
#define VC4_TEX_P0_FLIPY_SHIFT 8
#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
#define VC4_TEX_P0_TYPE_SHIFT 4
#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
#define VC4_TEX_P0_MIPLVLS_SHIFT 0
#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
#define VC4_TEX_P1_TYPE4_SHIFT 31
#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
#define VC4_TEX_P1_HEIGHT_SHIFT 20
#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
#define VC4_TEX_P1_ETCFLIP_SHIFT 19
#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
#define VC4_TEX_P1_WIDTH_SHIFT 8
#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
#define VC4_TEX_P1_MAGFILT_SHIFT 7
# define VC4_TEX_P1_MAGFILT_LINEAR 0
# define VC4_TEX_P1_MAGFILT_NEAREST 1
#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
#define VC4_TEX_P1_MINFILT_SHIFT 4
# define VC4_TEX_P1_MINFILT_LINEAR 0
# define VC4_TEX_P1_MINFILT_NEAREST 1
# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
#define VC4_TEX_P1_WRAP_T_SHIFT 2
#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
#define VC4_TEX_P1_WRAP_S_SHIFT 0
# define VC4_TEX_P1_WRAP_REPEAT 0
# define VC4_TEX_P1_WRAP_CLAMP 1
# define VC4_TEX_P1_WRAP_MIRROR 2
# define VC4_TEX_P1_WRAP_BORDER 3
#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
#define VC4_TEX_P2_PTYPE_SHIFT 30
# define VC4_TEX_P2_PTYPE_IGNORED 0
# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
#define VC4_TEX_P2_CMST_SHIFT 12
#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
#define VC4_TEX_P2_BSLOD_SHIFT 0
/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
#define VC4_TEX_P2_CHEIGHT_SHIFT 12
#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
#define VC4_TEX_P2_CWIDTH_SHIFT 0
/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
#define VC4_TEX_P2_CYOFF_SHIFT 12
#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
#define VC4_TEX_P2_CXOFF_SHIFT 0
#endif /* VC4_PACKET_H */
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <[email protected]>
*/
#ifndef _SUN4I_LVDS_H_
#define _SUN4I_LVDS_H_
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon);
#endif /* _SUN4I_LVDS_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Xillybus Ltd, http://xillybus.com
*
* Driver for the XillyUSB FPGA/host framework.
*
* This driver interfaces with a special IP core in an FPGA, setting up
* a pipe between a hardware FIFO in the programmable logic and a device
* file in the host. The number of such pipes and their attributes are
* set up on the logic. This driver detects these automatically and
* creates the device files accordingly.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
#include <linux/poll.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include "xillybus_class.h"
MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
MODULE_ALIAS("xillyusb");
MODULE_LICENSE("GPL v2");
#define XILLY_RX_TIMEOUT (10 * HZ / 1000)
#define XILLY_RESPONSE_TIMEOUT (500 * HZ / 1000)
#define BUF_SIZE_ORDER 4
#define BUFNUM 8
#define LOG2_IDT_FIFO_SIZE 16
#define LOG2_INITIAL_FIFO_BUF_SIZE 16
#define MSG_EP_NUM 1
#define IN_EP_NUM 1
static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order;
static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb
#define USB_PRODUCT_ID_XILLYUSB 0xebbe
static const struct usb_device_id xillyusb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
{ USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
{ }
};
MODULE_DEVICE_TABLE(usb, xillyusb_table);
struct xillyusb_dev;
struct xillyfifo {
unsigned int bufsize; /* In bytes, always a power of 2 */
unsigned int bufnum;
unsigned int size; /* Lazy: Equals bufsize * bufnum */
unsigned int buf_order;
int fill; /* Number of bytes in the FIFO */
spinlock_t lock;
wait_queue_head_t waitq;
unsigned int readpos;
unsigned int readbuf;
unsigned int writepos;
unsigned int writebuf;
char **mem;
};
struct xillyusb_channel;
struct xillyusb_endpoint {
struct xillyusb_dev *xdev;
struct mutex ep_mutex; /* serialize operations on endpoint */
struct list_head buffers;
struct list_head filled_buffers;
spinlock_t buffers_lock; /* protect these two lists */
unsigned int order;
unsigned int buffer_size;
unsigned int fill_mask;
int outstanding_urbs;
struct usb_anchor anchor;
struct xillyfifo fifo;
struct work_struct workitem;
bool shutting_down;
bool drained;
bool wake_on_drain;
u8 ep_num;
};
struct xillyusb_channel {
struct xillyusb_dev *xdev;
struct xillyfifo *in_fifo;
struct xillyusb_endpoint *out_ep;
struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
struct mutex in_mutex; /* serialize fops on FPGA to host stream */
struct mutex out_mutex; /* serialize fops on host to FPGA stream */
wait_queue_head_t flushq;
int chan_idx;
u32 in_consumed_bytes;
u32 in_current_checkpoint;
u32 out_bytes;
unsigned int in_log2_element_size;
unsigned int out_log2_element_size;
unsigned int in_log2_fifo_size;
unsigned int out_log2_fifo_size;
unsigned int read_data_ok; /* EOF not arrived (yet) */
unsigned int poll_used;
unsigned int flushing;
unsigned int flushed;
unsigned int canceled;
/* Bit fields protected by @lock except for initialization */
unsigned readable:1;
unsigned writable:1;
unsigned open_for_read:1;
unsigned open_for_write:1;
unsigned in_synchronous:1;
unsigned out_synchronous:1;
unsigned in_seekable:1;
unsigned out_seekable:1;
};
struct xillybuffer {
struct list_head entry;
struct xillyusb_endpoint *ep;
void *buf;
unsigned int len;
};
struct xillyusb_dev {
struct xillyusb_channel *channels;
struct usb_device *udev;
struct device *dev; /* For dev_err() and such */
struct kref kref;
struct workqueue_struct *workq;
int error;
spinlock_t error_lock; /* protect @error */
struct work_struct wakeup_workitem;
int num_channels;
struct xillyusb_endpoint *msg_ep;
struct xillyusb_endpoint *in_ep;
struct mutex msg_mutex; /* serialize opcode transmission */
int in_bytes_left;
int leftover_chan_num;
unsigned int in_counter;
struct mutex process_in_mutex; /* synchronize wakeup_all() */
};
/*
* kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev
* struct from being freed during the gap between being found by
* xillybus_find_inode() and having its reference count incremented.
*/
static DEFINE_MUTEX(kref_mutex);
/* FPGA to host opcodes */
enum {
OPCODE_DATA = 0,
OPCODE_QUIESCE_ACK = 1,
OPCODE_EOF = 2,
OPCODE_REACHED_CHECKPOINT = 3,
OPCODE_CANCELED_CHECKPOINT = 4,
};
/* Host to FPGA opcodes */
enum {
OPCODE_QUIESCE = 0,
OPCODE_REQ_IDT = 1,
OPCODE_SET_CHECKPOINT = 2,
OPCODE_CLOSE = 3,
OPCODE_SET_PUSH = 4,
OPCODE_UPDATE_PUSH = 5,
OPCODE_CANCEL_CHECKPOINT = 6,
OPCODE_SET_ADDR = 7,
};
/*
* fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
* calls to each on the same FIFO is not allowed) however it's OK to have
* threads calling each of the two functions once on the same FIFO, and
* at the same time.
*/
static int fifo_write(struct xillyfifo *fifo,
const void *data, unsigned int len,
int (*copier)(void *, const void *, int))
{
unsigned int done = 0;
unsigned int todo = len;
unsigned int nmax;
unsigned int writepos = fifo->writepos;
unsigned int writebuf = fifo->writebuf;
unsigned long flags;
int rc;
nmax = fifo->size - READ_ONCE(fifo->fill);
while (1) {
unsigned int nrail = fifo->bufsize - writepos;
unsigned int n = min(todo, nmax);
if (n == 0) {
spin_lock_irqsave(&fifo->lock, flags);
fifo->fill += done;
spin_unlock_irqrestore(&fifo->lock, flags);
fifo->writepos = writepos;
fifo->writebuf = writebuf;
return done;
}
if (n > nrail)
n = nrail;
rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
if (rc)
return rc;
done += n;
todo -= n;
writepos += n;
nmax -= n;
if (writepos == fifo->bufsize) {
writepos = 0;
writebuf++;
if (writebuf == fifo->bufnum)
writebuf = 0;
}
}
}
static int fifo_read(struct xillyfifo *fifo,
void *data, unsigned int len,
int (*copier)(void *, const void *, int))
{
unsigned int done = 0;
unsigned int todo = len;
unsigned int fill;
unsigned int readpos = fifo->readpos;
unsigned int readbuf = fifo->readbuf;
unsigned long flags;
int rc;
/*
* The spinlock here is necessary, because otherwise fifo->fill
* could have been increased by fifo_write() after writing data
* to the buffer, but this data would potentially not have been
* visible on this thread at the time the updated fifo->fill was.
* That could lead to reading invalid data.
*/
spin_lock_irqsave(&fifo->lock, flags);
fill = fifo->fill;
spin_unlock_irqrestore(&fifo->lock, flags);
while (1) {
unsigned int nrail = fifo->bufsize - readpos;
unsigned int n = min(todo, fill);
if (n == 0) {
spin_lock_irqsave(&fifo->lock, flags);
fifo->fill -= done;
spin_unlock_irqrestore(&fifo->lock, flags);
fifo->readpos = readpos;
fifo->readbuf = readbuf;
return done;
}
if (n > nrail)
n = nrail;
rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
if (rc)
return rc;
done += n;
todo -= n;
readpos += n;
fill -= n;
if (readpos == fifo->bufsize) {
readpos = 0;
readbuf++;
if (readbuf == fifo->bufnum)
readbuf = 0;
}
}
}
/*
* These three wrapper functions are used as the @copier argument to
* fifo_write() and fifo_read(), so that they can work directly with
* user memory as well.
*/
static int xilly_copy_from_user(void *dst, const void *src, int n)
{
if (copy_from_user(dst, (const void __user *)src, n))
return -EFAULT;
return 0;
}
static int xilly_copy_to_user(void *dst, const void *src, int n)
{
if (copy_to_user((void __user *)dst, src, n))
return -EFAULT;
return 0;
}
static int xilly_memcpy(void *dst, const void *src, int n)
{
memcpy(dst, src, n);
return 0;
}
static int fifo_init(struct xillyfifo *fifo,
unsigned int log2_size)
{
unsigned int log2_bufnum;
unsigned int buf_order;
int i;
unsigned int log2_fifo_buf_size;
retry:
log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
if (log2_size > log2_fifo_buf_size) {
log2_bufnum = log2_size - log2_fifo_buf_size;
buf_order = fifo_buf_order;
fifo->bufsize = 1 << log2_fifo_buf_size;
} else {
log2_bufnum = 0;
buf_order = (log2_size > PAGE_SHIFT) ?
log2_size - PAGE_SHIFT : 0;
fifo->bufsize = 1 << log2_size;
}
fifo->bufnum = 1 << log2_bufnum;
fifo->size = fifo->bufnum * fifo->bufsize;
fifo->buf_order = buf_order;
fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
if (!fifo->mem)
return -ENOMEM;
for (i = 0; i < fifo->bufnum; i++) {
fifo->mem[i] = (void *)
__get_free_pages(GFP_KERNEL, buf_order);
if (!fifo->mem[i])
goto memfail;
}
fifo->fill = 0;
fifo->readpos = 0;
fifo->readbuf = 0;
fifo->writepos = 0;
fifo->writebuf = 0;
spin_lock_init(&fifo->lock);
init_waitqueue_head(&fifo->waitq);
return 0;
memfail:
for (i--; i >= 0; i--)
free_pages((unsigned long)fifo->mem[i], buf_order);
kfree(fifo->mem);
fifo->mem = NULL;
if (fifo_buf_order) {
fifo_buf_order--;
goto retry;
} else {
return -ENOMEM;
}
}
static void fifo_mem_release(struct xillyfifo *fifo)
{
int i;
if (!fifo->mem)
return;
for (i = 0; i < fifo->bufnum; i++)
free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
kfree(fifo->mem);
}
/*
* When endpoint_quiesce() returns, the endpoint has no URBs submitted,
* won't accept any new URB submissions, and its related work item doesn't
* and won't run anymore.
*/
static void endpoint_quiesce(struct xillyusb_endpoint *ep)
{
mutex_lock(&ep->ep_mutex);
ep->shutting_down = true;
mutex_unlock(&ep->ep_mutex);
usb_kill_anchored_urbs(&ep->anchor);
cancel_work_sync(&ep->workitem);
}
/*
* Note that endpoint_dealloc() also frees fifo memory (if allocated), even
* though endpoint_alloc doesn't allocate that memory.
*/
static void endpoint_dealloc(struct xillyusb_endpoint *ep)
{
struct list_head *this, *next;
fifo_mem_release(&ep->fifo);
/* Join @filled_buffers with @buffers to free these entries too */
list_splice(&ep->filled_buffers, &ep->buffers);
list_for_each_safe(this, next, &ep->buffers) {
struct xillybuffer *xb =
list_entry(this, struct xillybuffer, entry);
free_pages((unsigned long)xb->buf, ep->order);
kfree(xb);
}
kfree(ep);
}
static struct xillyusb_endpoint
*endpoint_alloc(struct xillyusb_dev *xdev,
u8 ep_num,
void (*work)(struct work_struct *),
unsigned int order,
int bufnum)
{
int i;
struct xillyusb_endpoint *ep;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return NULL;
INIT_LIST_HEAD(&ep->buffers);
INIT_LIST_HEAD(&ep->filled_buffers);
spin_lock_init(&ep->buffers_lock);
mutex_init(&ep->ep_mutex);
init_usb_anchor(&ep->anchor);
INIT_WORK(&ep->workitem, work);
ep->order = order;
ep->buffer_size = 1 << (PAGE_SHIFT + order);
ep->outstanding_urbs = 0;
ep->drained = true;
ep->wake_on_drain = false;
ep->xdev = xdev;
ep->ep_num = ep_num;
ep->shutting_down = false;
for (i = 0; i < bufnum; i++) {
struct xillybuffer *xb;
unsigned long addr;
xb = kzalloc(sizeof(*xb), GFP_KERNEL);
if (!xb) {
endpoint_dealloc(ep);
return NULL;
}
addr = __get_free_pages(GFP_KERNEL, order);
if (!addr) {
kfree(xb);
endpoint_dealloc(ep);
return NULL;
}
xb->buf = (void *)addr;
xb->ep = ep;
list_add_tail(&xb->entry, &ep->buffers);
}
return ep;
}
static void cleanup_dev(struct kref *kref)
{
struct xillyusb_dev *xdev =
container_of(kref, struct xillyusb_dev, kref);
if (xdev->in_ep)
endpoint_dealloc(xdev->in_ep);
if (xdev->msg_ep)
endpoint_dealloc(xdev->msg_ep);
if (xdev->workq)
destroy_workqueue(xdev->workq);
usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}
/*
* @process_in_mutex is taken to ensure that bulk_in_work() won't call
* process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
* @read_data_ok entries, which will make process_bulk_in() report false
* errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in().
*/
static void wakeup_all(struct work_struct *work)
{
int i;
struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
wakeup_workitem);
mutex_lock(&xdev->process_in_mutex);
for (i = 0; i < xdev->num_channels; i++) {
struct xillyusb_channel *chan = &xdev->channels[i];
mutex_lock(&chan->lock);
if (chan->in_fifo) {
/*
* Fake an EOF: Even if such arrives, it won't be
* processed.
*/
chan->read_data_ok = 0;
wake_up_interruptible(&chan->in_fifo->waitq);
}
if (chan->out_ep)
wake_up_interruptible(&chan->out_ep->fifo.waitq);
mutex_unlock(&chan->lock);
wake_up_interruptible(&chan->flushq);
}
mutex_unlock(&xdev->process_in_mutex);
wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
kref_put(&xdev->kref, cleanup_dev);
}
static void report_io_error(struct xillyusb_dev *xdev,
int errcode)
{
unsigned long flags;
bool do_once = false;
spin_lock_irqsave(&xdev->error_lock, flags);
if (!xdev->error) {
xdev->error = errcode;
do_once = true;
}
spin_unlock_irqrestore(&xdev->error_lock, flags);
if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */
queue_work(wakeup_wq, &xdev->wakeup_workitem);
}
}
/*
* safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
* the previous pointer is never used after its return.
*/
static void safely_assign_in_fifo(struct xillyusb_channel *chan,
struct xillyfifo *fifo)
{
mutex_lock(&chan->lock);
chan->in_fifo = fifo;
mutex_unlock(&chan->lock);
flush_work(&chan->xdev->in_ep->workitem);
}
static void bulk_in_completer(struct urb *urb)
{
struct xillybuffer *xb = urb->context;
struct xillyusb_endpoint *ep = xb->ep;
unsigned long flags;
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
report_io_error(ep->xdev, -EIO);
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->buffers);
ep->outstanding_urbs--;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
return;
}
xb->len = urb->actual_length;
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->filled_buffers);
spin_unlock_irqrestore(&ep->buffers_lock, flags);
if (!ep->shutting_down)
queue_work(ep->xdev->workq, &ep->workitem);
}
static void bulk_out_completer(struct urb *urb)
{
struct xillybuffer *xb = urb->context;
struct xillyusb_endpoint *ep = xb->ep;
unsigned long flags;
if (urb->status &&
(!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)))
report_io_error(ep->xdev, -EIO);
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->buffers);
ep->outstanding_urbs--;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
if (!ep->shutting_down)
queue_work(ep->xdev->workq, &ep->workitem);
}
static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
{
struct xillyusb_dev *xdev = ep->xdev;
struct xillybuffer *xb;
struct urb *urb;
int rc;
unsigned long flags;
unsigned int bufsize = ep->buffer_size;
mutex_lock(&ep->ep_mutex);
if (ep->shutting_down || xdev->error)
goto done;
while (1) {
spin_lock_irqsave(&ep->buffers_lock, flags);
if (list_empty(&ep->buffers)) {
spin_unlock_irqrestore(&ep->buffers_lock, flags);
goto done;
}
xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
list_del(&xb->entry);
ep->outstanding_urbs++;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
report_io_error(xdev, -ENOMEM);
goto relist;
}
usb_fill_bulk_urb(urb, xdev->udev,
usb_rcvbulkpipe(xdev->udev, ep->ep_num),
xb->buf, bufsize, bulk_in_completer, xb);
usb_anchor_urb(urb, &ep->anchor);
rc = usb_submit_urb(urb, GFP_KERNEL);
if (rc) {
report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
-EIO);
goto unanchor;
}
usb_free_urb(urb); /* This just decrements reference count */
}
unanchor:
usb_unanchor_urb(urb);
usb_free_urb(urb);
relist:
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->buffers);
ep->outstanding_urbs--;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
done:
mutex_unlock(&ep->ep_mutex);
}
static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
{
struct xillyfifo *fifo = &ep->fifo;
struct xillyusb_dev *xdev = ep->xdev;
struct xillybuffer *xb;
struct urb *urb;
int rc;
unsigned int fill;
unsigned long flags;
bool do_wake = false;
mutex_lock(&ep->ep_mutex);
if (ep->shutting_down || xdev->error)
goto done;
fill = READ_ONCE(fifo->fill) & ep->fill_mask;
while (1) {
int count;
unsigned int max_read;
spin_lock_irqsave(&ep->buffers_lock, flags);
/*
* Race conditions might have the FIFO filled while the
* endpoint is marked as drained here. That doesn't matter,
* because the sole purpose of @drained is to ensure that
* certain data has been sent on the USB channel before
* shutting it down. Hence knowing that the FIFO appears
* to be empty with no outstanding URBs at some moment
* is good enough.
*/
if (!fill) {
ep->drained = !ep->outstanding_urbs;
if (ep->drained && ep->wake_on_drain)
do_wake = true;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
goto done;
}
ep->drained = false;
if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
list_empty(&ep->buffers)) {
spin_unlock_irqrestore(&ep->buffers_lock, flags);
goto done;
}
xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
list_del(&xb->entry);
ep->outstanding_urbs++;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
max_read = min(fill, ep->buffer_size);
count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
/*
* xilly_memcpy always returns 0 => fifo_read can't fail =>
* count > 0
*/
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
report_io_error(xdev, -ENOMEM);
goto relist;
}
usb_fill_bulk_urb(urb, xdev->udev,
usb_sndbulkpipe(xdev->udev, ep->ep_num),
xb->buf, count, bulk_out_completer, xb);
usb_anchor_urb(urb, &ep->anchor);
rc = usb_submit_urb(urb, GFP_KERNEL);
if (rc) {
report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
-EIO);
goto unanchor;
}
usb_free_urb(urb); /* This just decrements reference count */
fill -= count;
do_wake = true;
}
unanchor:
usb_unanchor_urb(urb);
usb_free_urb(urb);
relist:
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->buffers);
ep->outstanding_urbs--;
spin_unlock_irqrestore(&ep->buffers_lock, flags);
done:
mutex_unlock(&ep->ep_mutex);
if (do_wake)
wake_up_interruptible(&fifo->waitq);
}
static void bulk_out_work(struct work_struct *work)
{
struct xillyusb_endpoint *ep = container_of(work,
struct xillyusb_endpoint,
workitem);
try_queue_bulk_out(ep);
}
static int process_in_opcode(struct xillyusb_dev *xdev,
int opcode,
int chan_num)
{
struct xillyusb_channel *chan;
struct device *dev = xdev->dev;
int chan_idx = chan_num >> 1;
if (chan_idx >= xdev->num_channels) {
dev_err(dev, "Received illegal channel ID %d from FPGA\n",
chan_num);
return -EIO;
}
chan = &xdev->channels[chan_idx];
switch (opcode) {
case OPCODE_EOF:
if (!chan->read_data_ok) {
dev_err(dev, "Received unexpected EOF for channel %d\n",
chan_num);
return -EIO;
}
/*
* A write memory barrier ensures that the FIFO's fill level
* is visible before read_data_ok turns zero, so the data in
* the FIFO isn't missed by the consumer.
*/
smp_wmb();
WRITE_ONCE(chan->read_data_ok, 0);
wake_up_interruptible(&chan->in_fifo->waitq);
break;
case OPCODE_REACHED_CHECKPOINT:
chan->flushing = 0;
wake_up_interruptible(&chan->flushq);
break;
case OPCODE_CANCELED_CHECKPOINT:
chan->canceled = 1;
wake_up_interruptible(&chan->flushq);
break;
default:
dev_err(dev, "Received illegal opcode %d from FPGA\n",
opcode);
return -EIO;
}
return 0;
}
static int process_bulk_in(struct xillybuffer *xb)
{
struct xillyusb_endpoint *ep = xb->ep;
struct xillyusb_dev *xdev = ep->xdev;
struct device *dev = xdev->dev;
int dws = xb->len >> 2;
__le32 *p = xb->buf;
u32 ctrlword;
struct xillyusb_channel *chan;
struct xillyfifo *fifo;
int chan_num = 0, opcode;
int chan_idx;
int bytes, count, dwconsume;
int in_bytes_left = 0;
int rc;
if ((dws << 2) != xb->len) {
dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
xb->len);
return -EIO;
}
if (xdev->in_bytes_left) {
bytes = min(xdev->in_bytes_left, dws << 2);
in_bytes_left = xdev->in_bytes_left - bytes;
chan_num = xdev->leftover_chan_num;
goto resume_leftovers;
}
while (dws) {
ctrlword = le32_to_cpu(*p++);
dws--;
chan_num = ctrlword & 0xfff;
count = (ctrlword >> 12) & 0x3ff;
opcode = (ctrlword >> 24) & 0xf;
if (opcode != OPCODE_DATA) {
unsigned int in_counter = xdev->in_counter++ & 0x3ff;
if (count != in_counter) {
dev_err(dev, "Expected opcode counter %d, got %d\n",
in_counter, count);
return -EIO;
}
rc = process_in_opcode(xdev, opcode, chan_num);
if (rc)
return rc;
continue;
}
bytes = min(count + 1, dws << 2);
in_bytes_left = count + 1 - bytes;
resume_leftovers:
chan_idx = chan_num >> 1;
if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
!xdev->channels[chan_idx].read_data_ok) {
dev_err(dev, "Received illegal channel ID %d from FPGA\n",
chan_num);
return -EIO;
}
chan = &xdev->channels[chan_idx];
fifo = chan->in_fifo;
if (unlikely(!fifo))
return -EIO; /* We got really unexpected data */
if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
return -EIO;
}
wake_up_interruptible(&fifo->waitq);
dwconsume = (bytes + 3) >> 2;
dws -= dwconsume;
p += dwconsume;
}
xdev->in_bytes_left = in_bytes_left;
xdev->leftover_chan_num = chan_num;
return 0;
}
static void bulk_in_work(struct work_struct *work)
{
struct xillyusb_endpoint *ep =
container_of(work, struct xillyusb_endpoint, workitem);
struct xillyusb_dev *xdev = ep->xdev;
unsigned long flags;
struct xillybuffer *xb;
bool consumed = false;
int rc = 0;
mutex_lock(&xdev->process_in_mutex);
spin_lock_irqsave(&ep->buffers_lock, flags);
while (1) {
if (rc || list_empty(&ep->filled_buffers)) {
spin_unlock_irqrestore(&ep->buffers_lock, flags);
mutex_unlock(&xdev->process_in_mutex);
if (rc)
report_io_error(xdev, rc);
else if (consumed)
try_queue_bulk_in(ep);
return;
}
xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
entry);
list_del(&xb->entry);
spin_unlock_irqrestore(&ep->buffers_lock, flags);
consumed = true;
if (!xdev->error)
rc = process_bulk_in(xb);
spin_lock_irqsave(&ep->buffers_lock, flags);
list_add_tail(&xb->entry, &ep->buffers);
ep->outstanding_urbs--;
}
}
static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
int chan_num, char opcode, u32 data)
{
struct xillyusb_endpoint *ep = xdev->msg_ep;
struct xillyfifo *fifo = &ep->fifo;
__le32 msg[2];
int rc = 0;
msg[0] = cpu_to_le32((chan_num & 0xfff) |
((opcode & 0xf) << 24));
msg[1] = cpu_to_le32(data);
mutex_lock(&xdev->msg_mutex);
/*
* The wait queue is woken with the interruptible variant, so the
* wait function matches, however returning because of an interrupt
* will mess things up considerably, in particular when the caller is
* the release method. And the xdev->error part prevents being stuck
* forever in the event of a bizarre hardware bug: Pull the USB plug.
*/
while (wait_event_interruptible(fifo->waitq,
fifo->fill <= (fifo->size - 8) ||
xdev->error))
; /* Empty loop */
if (xdev->error) {
rc = xdev->error;
goto unlock_done;
}
fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
try_queue_bulk_out(ep);
unlock_done:
mutex_unlock(&xdev->msg_mutex);
return rc;
}
/*
* Note that flush_downstream() merely waits for the data to arrive to
* the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
* it does nothing to make it happen (and neither is it necessary).
*
* This function is not reentrant for the same @chan, but this is covered
* by the fact that for any given @chan, it's called either by the open,
* write, llseek and flush fops methods, which can't run in parallel (and the
* write + flush and llseek method handlers are protected with out_mutex).
*
* chan->flushed is there to avoid multiple flushes at the same position,
* in particular as a result of programs that close the file descriptor
* e.g. after a dup2() for redirection.
*/
static int flush_downstream(struct xillyusb_channel *chan,
long timeout,
bool interruptible)
{
struct xillyusb_dev *xdev = chan->xdev;
int chan_num = chan->chan_idx << 1;
long deadline, left_to_sleep;
int rc;
if (chan->flushed)
return 0;
deadline = jiffies + 1 + timeout;
if (chan->flushing) {
long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
chan->canceled = 0;
rc = xillyusb_send_opcode(xdev, chan_num,
OPCODE_CANCEL_CHECKPOINT, 0);
if (rc)
return rc; /* Only real error, never -EINTR */
/* Ignoring interrupts. Cancellation must be handled */
while (!chan->canceled) {
left_to_sleep = cancel_deadline - ((long)jiffies);
if (left_to_sleep <= 0) {
report_io_error(xdev, -EIO);
return -EIO;
}
rc = wait_event_interruptible_timeout(chan->flushq,
chan->canceled ||
xdev->error,
left_to_sleep);
if (xdev->error)
return xdev->error;
}
}
chan->flushing = 1;
/*
* The checkpoint is given in terms of data elements, not bytes. As
* a result, if less than an element's worth of data is stored in the
* FIFO, it's not flushed, including the flush before closing, which
* means that such data is lost. This is consistent with PCIe Xillybus.
*/
rc = xillyusb_send_opcode(xdev, chan_num,
OPCODE_SET_CHECKPOINT,
chan->out_bytes >>
chan->out_log2_element_size);
if (rc)
return rc; /* Only real error, never -EINTR */
if (!timeout) {
while (chan->flushing) {
rc = wait_event_interruptible(chan->flushq,
!chan->flushing ||
xdev->error);
if (xdev->error)
return xdev->error;
if (interruptible && rc)
return -EINTR;
}
goto done;
}
while (chan->flushing) {
left_to_sleep = deadline - ((long)jiffies);
if (left_to_sleep <= 0)
return -ETIMEDOUT;
rc = wait_event_interruptible_timeout(chan->flushq,
!chan->flushing ||
xdev->error,
left_to_sleep);
if (xdev->error)
return xdev->error;
if (interruptible && rc < 0)
return -EINTR;
}
done:
chan->flushed = 1;
return 0;
}
/* request_read_anything(): Ask the FPGA for any little amount of data */
static int request_read_anything(struct xillyusb_channel *chan,
char opcode)
{
struct xillyusb_dev *xdev = chan->xdev;
unsigned int sh = chan->in_log2_element_size;
int chan_num = (chan->chan_idx << 1) | 1;
u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
}
static int xillyusb_open(struct inode *inode, struct file *filp)
{
struct xillyusb_dev *xdev;
struct xillyusb_channel *chan;
struct xillyfifo *in_fifo = NULL;
struct xillyusb_endpoint *out_ep = NULL;
int rc;
int index;
mutex_lock(&kref_mutex);
rc = xillybus_find_inode(inode, (void **)&xdev, &index);
if (rc) {
mutex_unlock(&kref_mutex);
return rc;
}
kref_get(&xdev->kref);
mutex_unlock(&kref_mutex);
chan = &xdev->channels[index];
filp->private_data = chan;
mutex_lock(&chan->lock);
rc = -ENODEV;
if (xdev->error)
goto unmutex_fail;
if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
((filp->f_mode & FMODE_WRITE) && !chan->writable))
goto unmutex_fail;
if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
chan->in_synchronous) {
dev_err(xdev->dev,
"open() failed: O_NONBLOCK not allowed for read on this device\n");
goto unmutex_fail;
}
if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
chan->out_synchronous) {
dev_err(xdev->dev,
"open() failed: O_NONBLOCK not allowed for write on this device\n");
goto unmutex_fail;
}
rc = -EBUSY;
if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
goto unmutex_fail;
if (filp->f_mode & FMODE_READ)
chan->open_for_read = 1;
if (filp->f_mode & FMODE_WRITE)
chan->open_for_write = 1;
mutex_unlock(&chan->lock);
if (filp->f_mode & FMODE_WRITE) {
out_ep = endpoint_alloc(xdev,
(chan->chan_idx + 2) | USB_DIR_OUT,
bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
if (!out_ep) {
rc = -ENOMEM;
goto unopen;
}
rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
if (rc)
goto late_unopen;
out_ep->fill_mask = -(1 << chan->out_log2_element_size);
chan->out_bytes = 0;
chan->flushed = 0;
/*
* Sending a flush request to a previously closed stream
* effectively opens it, and also waits until the command is
* confirmed by the FPGA. The latter is necessary because the
* data is sent through a separate BULK OUT endpoint, and the
* xHCI controller is free to reorder transmissions.
*
* This can't go wrong unless there's a serious hardware error
* (or the computer is stuck for 500 ms?)
*/
rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
if (rc == -ETIMEDOUT) {
rc = -EIO;
report_io_error(xdev, rc);
}
if (rc)
goto late_unopen;
}
if (filp->f_mode & FMODE_READ) {
in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
if (!in_fifo) {
rc = -ENOMEM;
goto late_unopen;
}
rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
if (rc) {
kfree(in_fifo);
goto late_unopen;
}
}
mutex_lock(&chan->lock);
if (in_fifo) {
chan->in_fifo = in_fifo;
chan->read_data_ok = 1;
}
if (out_ep)
chan->out_ep = out_ep;
mutex_unlock(&chan->lock);
if (in_fifo) {
u32 in_checkpoint = 0;
if (!chan->in_synchronous)
in_checkpoint = in_fifo->size >>
chan->in_log2_element_size;
chan->in_consumed_bytes = 0;
chan->poll_used = 0;
chan->in_current_checkpoint = in_checkpoint;
rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
OPCODE_SET_CHECKPOINT,
in_checkpoint);
if (rc) /* Failure guarantees that opcode wasn't sent */
goto unfifo;
/*
* In non-blocking mode, request the FPGA to send any data it
* has right away. Otherwise, the first read() will always
* return -EAGAIN, which is OK strictly speaking, but ugly.
* Checking and unrolling if this fails isn't worth the
* effort -- the error is propagated to the first read()
* anyhow.
*/
if (filp->f_flags & O_NONBLOCK)
request_read_anything(chan, OPCODE_SET_PUSH);
}
return 0;
unfifo:
chan->read_data_ok = 0;
safely_assign_in_fifo(chan, NULL);
fifo_mem_release(in_fifo);
kfree(in_fifo);
if (out_ep) {
mutex_lock(&chan->lock);
chan->out_ep = NULL;
mutex_unlock(&chan->lock);
}
late_unopen:
if (out_ep)
endpoint_dealloc(out_ep);
unopen:
mutex_lock(&chan->lock);
if (filp->f_mode & FMODE_READ)
chan->open_for_read = 0;
if (filp->f_mode & FMODE_WRITE)
chan->open_for_write = 0;
mutex_unlock(&chan->lock);
kref_put(&xdev->kref, cleanup_dev);
return rc;
unmutex_fail:
kref_put(&xdev->kref, cleanup_dev);
mutex_unlock(&chan->lock);
return rc;
}
static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *f_pos)
{
struct xillyusb_channel *chan = filp->private_data;
struct xillyusb_dev *xdev = chan->xdev;
struct xillyfifo *fifo = chan->in_fifo;
int chan_num = (chan->chan_idx << 1) | 1;
long deadline, left_to_sleep;
int bytes_done = 0;
bool sent_set_push = false;
int rc;
deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
rc = mutex_lock_interruptible(&chan->in_mutex);
if (rc)
return rc;
while (1) {
u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
u32 complete_checkpoint, fifo_checkpoint;
u32 checkpoint;
s32 diff, leap;
unsigned int sh = chan->in_log2_element_size;
bool checkpoint_for_complete;
rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
count - bytes_done, xilly_copy_to_user);
if (rc < 0)
break;
bytes_done += rc;
chan->in_consumed_bytes += rc;
left_to_sleep = deadline - ((long)jiffies);
/*
* Some 32-bit arithmetic that may wrap. Note that
* complete_checkpoint is rounded up to the closest element
* boundary, because the read() can't be completed otherwise.
* fifo_checkpoint_bytes is rounded down, because it protects
* in_fifo from overflowing.
*/
fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
complete_checkpoint_bytes =
chan->in_consumed_bytes + count - bytes_done;
fifo_checkpoint = fifo_checkpoint_bytes >> sh;
complete_checkpoint =
(complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
diff = (fifo_checkpoint - complete_checkpoint) << sh;
if (chan->in_synchronous && diff >= 0) {
checkpoint = complete_checkpoint;
checkpoint_for_complete = true;
} else {
checkpoint = fifo_checkpoint;
checkpoint_for_complete = false;
}
leap = (checkpoint - chan->in_current_checkpoint) << sh;
/*
* To prevent flooding of OPCODE_SET_CHECKPOINT commands as
* data is consumed, it's issued only if it moves the
* checkpoint by at least an 8th of the FIFO's size, or if
* it's necessary to complete the number of bytes requested by
* the read() call.
*
* chan->read_data_ok is checked to spare an unnecessary
* submission after receiving EOF, however it's harmless if
* such slips away.
*/
if (chan->read_data_ok &&
(leap > (fifo->size >> 3) ||
(checkpoint_for_complete && leap > 0))) {
chan->in_current_checkpoint = checkpoint;
rc = xillyusb_send_opcode(xdev, chan_num,
OPCODE_SET_CHECKPOINT,
checkpoint);
if (rc)
break;
}
if (bytes_done == count ||
(left_to_sleep <= 0 && bytes_done))
break;
/*
* Reaching here means that the FIFO was empty when
* fifo_read() returned, but not necessarily right now. Error
* and EOF are checked and reported only now, so that no data
* that managed its way to the FIFO is lost.
*/
if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
/* Has data slipped into the FIFO since fifo_read()? */
smp_rmb();
if (READ_ONCE(fifo->fill))
continue;
rc = 0;
break;
}
if (xdev->error) {
rc = xdev->error;
break;
}
if (filp->f_flags & O_NONBLOCK) {
rc = -EAGAIN;
break;
}
if (!sent_set_push) {
rc = xillyusb_send_opcode(xdev, chan_num,
OPCODE_SET_PUSH,
complete_checkpoint);
if (rc)
break;
sent_set_push = true;
}
if (left_to_sleep > 0) {
/*
* Note that when xdev->error is set (e.g. when the
* device is unplugged), read_data_ok turns zero and
* fifo->waitq is awaken.
* Therefore no special attention to xdev->error.
*/
rc = wait_event_interruptible_timeout
(fifo->waitq,
fifo->fill || !chan->read_data_ok,
left_to_sleep);
} else { /* bytes_done == 0 */
/* Tell FPGA to send anything it has */
rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
if (rc)
break;
rc = wait_event_interruptible
(fifo->waitq,
fifo->fill || !chan->read_data_ok);
}
if (rc < 0) {
rc = -EINTR;
break;
}
}
if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
!READ_ONCE(fifo->fill))
request_read_anything(chan, OPCODE_SET_PUSH);
mutex_unlock(&chan->in_mutex);
if (bytes_done)
return bytes_done;
return rc;
}
static int xillyusb_flush(struct file *filp, fl_owner_t id)
{
struct xillyusb_channel *chan = filp->private_data;
int rc;
if (!(filp->f_mode & FMODE_WRITE))
return 0;
rc = mutex_lock_interruptible(&chan->out_mutex);
if (rc)
return rc;
/*
* One second's timeout on flushing. Interrupts are ignored, because if
* the user pressed CTRL-C, that interrupt will still be in flight by
* the time we reach here, and the opportunity to flush is lost.
*/
rc = flush_downstream(chan, HZ, false);
mutex_unlock(&chan->out_mutex);
if (rc == -ETIMEDOUT) {
/* The things you do to use dev_warn() and not pr_warn() */
struct xillyusb_dev *xdev = chan->xdev;
mutex_lock(&chan->lock);
if (!xdev->error)
dev_warn(xdev->dev,
"Timed out while flushing. Output data may be lost.\n");
mutex_unlock(&chan->lock);
}
return rc;
}
static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
size_t count, loff_t *f_pos)
{
struct xillyusb_channel *chan = filp->private_data;
struct xillyusb_dev *xdev = chan->xdev;
struct xillyfifo *fifo = &chan->out_ep->fifo;
int rc;
rc = mutex_lock_interruptible(&chan->out_mutex);
if (rc)
return rc;
while (1) {
if (xdev->error) {
rc = xdev->error;
break;
}
if (count == 0)
break;
rc = fifo_write(fifo, (__force void *)userbuf, count,
xilly_copy_from_user);
if (rc != 0)
break;
if (filp->f_flags & O_NONBLOCK) {
rc = -EAGAIN;
break;
}
if (wait_event_interruptible
(fifo->waitq,
fifo->fill != fifo->size || xdev->error)) {
rc = -EINTR;
break;
}
}
if (rc < 0)
goto done;
chan->out_bytes += rc;
if (rc) {
try_queue_bulk_out(chan->out_ep);
chan->flushed = 0;
}
if (chan->out_synchronous) {
int flush_rc = flush_downstream(chan, 0, true);
if (flush_rc && !rc)
rc = flush_rc;
}
done:
mutex_unlock(&chan->out_mutex);
return rc;
}
static int xillyusb_release(struct inode *inode, struct file *filp)
{
struct xillyusb_channel *chan = filp->private_data;
struct xillyusb_dev *xdev = chan->xdev;
int rc_read = 0, rc_write = 0;
if (filp->f_mode & FMODE_READ) {
struct xillyfifo *in_fifo = chan->in_fifo;
rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
OPCODE_CLOSE, 0);
/*
* If rc_read is nonzero, xdev->error indicates a global
* device error. The error is reported later, so that
* resources are freed.
*
* Looping on wait_event_interruptible() kinda breaks the idea
* of being interruptible, and this should have been
* wait_event(). Only it's being waken with
* wake_up_interruptible() for the sake of other uses. If
* there's a global device error, chan->read_data_ok is
* deasserted and the wait queue is awaken, so this is covered.
*/
while (wait_event_interruptible(in_fifo->waitq,
!chan->read_data_ok))
; /* Empty loop */
safely_assign_in_fifo(chan, NULL);
fifo_mem_release(in_fifo);
kfree(in_fifo);
mutex_lock(&chan->lock);
chan->open_for_read = 0;
mutex_unlock(&chan->lock);
}
if (filp->f_mode & FMODE_WRITE) {
struct xillyusb_endpoint *ep = chan->out_ep;
/*
* chan->flushing isn't zeroed. If the pre-release flush timed
* out, a cancel request will be sent before the next
* OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
* This is despite that the FPGA forgets about the checkpoint
* request as the file closes. Still, in an exceptional race
* condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
* just before closing that would reach the host after the
* file has re-opened.
*/
mutex_lock(&chan->lock);
chan->out_ep = NULL;
mutex_unlock(&chan->lock);
endpoint_quiesce(ep);
endpoint_dealloc(ep);
/* See comments on rc_read above */
rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
OPCODE_CLOSE, 0);
mutex_lock(&chan->lock);
chan->open_for_write = 0;
mutex_unlock(&chan->lock);
}
kref_put(&xdev->kref, cleanup_dev);
return rc_read ? rc_read : rc_write;
}
/*
* Xillybus' API allows device nodes to be seekable, giving the user
* application access to a RAM array on the FPGA (or logic emulating it).
*/
static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
{
struct xillyusb_channel *chan = filp->private_data;
struct xillyusb_dev *xdev = chan->xdev;
loff_t pos = filp->f_pos;
int rc = 0;
unsigned int log2_element_size = chan->readable ?
chan->in_log2_element_size : chan->out_log2_element_size;
/*
* Take both mutexes not allowing interrupts, since it seems like
* common applications don't expect an -EINTR here. Besides, multiple
* access to a single file descriptor on seekable devices is a mess
* anyhow.
*/
mutex_lock(&chan->out_mutex);
mutex_lock(&chan->in_mutex);
switch (whence) {
case SEEK_SET:
pos = offset;
break;
case SEEK_CUR:
pos += offset;
break;
case SEEK_END:
pos = offset; /* Going to the end => to the beginning */
break;
default:
rc = -EINVAL;
goto end;
}
/* In any case, we must finish on an element boundary */
if (pos & ((1 << log2_element_size) - 1)) {
rc = -EINVAL;
goto end;
}
rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
OPCODE_SET_ADDR,
pos >> log2_element_size);
if (rc)
goto end;
if (chan->writable) {
chan->flushed = 0;
rc = flush_downstream(chan, HZ, false);
}
end:
mutex_unlock(&chan->out_mutex);
mutex_unlock(&chan->in_mutex);
if (rc) /* Return error after releasing mutexes */
return rc;
filp->f_pos = pos;
return pos;
}
static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
{
struct xillyusb_channel *chan = filp->private_data;
__poll_t mask = 0;
if (chan->in_fifo)
poll_wait(filp, &chan->in_fifo->waitq, wait);
if (chan->out_ep)
poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
/*
* If this is the first time poll() is called, and the file is
* readable, set the relevant flag. Also tell the FPGA to send all it
* has, to kickstart the mechanism that ensures there's always some
* data in in_fifo unless the stream is dry end-to-end. Note that the
* first poll() may not return a EPOLLIN, even if there's data on the
* FPGA. Rather, the data will arrive soon, and trigger the relevant
* wait queue.
*/
if (!chan->poll_used && chan->in_fifo) {
chan->poll_used = 1;
request_read_anything(chan, OPCODE_SET_PUSH);
}
/*
* poll() won't play ball regarding read() channels which
* are synchronous. Allowing that will create situations where data has
* been delivered at the FPGA, and users expecting select() to wake up,
* which it may not. So make it never work.
*/
if (chan->in_fifo && !chan->in_synchronous &&
(READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
mask |= EPOLLIN | EPOLLRDNORM;
if (chan->out_ep &&
(READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
mask |= EPOLLOUT | EPOLLWRNORM;
if (chan->xdev->error)
mask |= EPOLLERR;
return mask;
}
static const struct file_operations xillyusb_fops = {
.owner = THIS_MODULE,
.read = xillyusb_read,
.write = xillyusb_write,
.open = xillyusb_open,
.flush = xillyusb_flush,
.release = xillyusb_release,
.llseek = xillyusb_llseek,
.poll = xillyusb_poll,
};
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{
struct usb_device *udev = xdev->udev;
/* Verify that device has the two fundamental bulk in/out endpoints */
if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
return -ENODEV;
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2);
if (!xdev->msg_ep)
return -ENOMEM;
if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
goto dealloc;
xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
if (!xdev->in_ep)
goto dealloc;
try_queue_bulk_in(xdev->in_ep);
return 0;
dealloc:
endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
xdev->msg_ep = NULL;
return -ENOMEM;
}
static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc,
int num_channels)
{
struct usb_device *udev = xdev->udev;
struct xillyusb_channel *chan, *new_channels;
int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++);
unsigned int out_desc = le16_to_cpu(*chandesc++);
chan->xdev = xdev;
mutex_init(&chan->in_mutex);
mutex_init(&chan->out_mutex);
mutex_init(&chan->lock);
init_waitqueue_head(&chan->flushq);
chan->chan_idx = i;
if (in_desc & 0x80) { /* Entry is valid */
chan->readable = 1;
chan->in_synchronous = !!(in_desc & 0x40);
chan->in_seekable = !!(in_desc & 0x20);
chan->in_log2_element_size = in_desc & 0x0f;
chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
}
/*
* A downstream channel should never exist above index 13,
* as it would request a nonexistent BULK endpoint > 15.
* In the peculiar case that it does, it's ignored silently.
*/
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
if (usb_pipe_type_check(udev,
usb_sndbulkpipe(udev, i + 2))) {
dev_err(xdev->dev,
"Missing BULK OUT endpoint %d\n",
i + 2);
kfree(new_channels);
return -ENODEV;
}
chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20);
chan->out_log2_element_size = out_desc & 0x0f;
chan->out_log2_fifo_size =
((out_desc >> 8) & 0x1f) + 16;
}
}
xdev->channels = new_channels;
return 0;
}
static int xillyusb_discovery(struct usb_interface *interface)
{
int rc;
struct xillyusb_dev *xdev = usb_get_intfdata(interface);
__le16 bogus_chandesc[2];
struct xillyfifo idt_fifo;
struct xillyusb_channel *chan;
unsigned int idt_len, names_offset;
unsigned char *idt;
int num_channels;
rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
if (rc) {
dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
return rc;
}
/* Phase I: Set up one fake upstream channel and obtain IDT */
/* Set up a fake IDT with one async IN stream */
bogus_chandesc[0] = cpu_to_le16(0x80);
bogus_chandesc[1] = cpu_to_le16(0);
rc = setup_channels(xdev, bogus_chandesc, 1);
if (rc)
return rc;
rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
if (rc)
return rc;
chan = xdev->channels;
chan->in_fifo = &idt_fifo;
chan->read_data_ok = 1;
xdev->num_channels = 1;
rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
if (rc) {
dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
goto unfifo;
}
rc = wait_event_interruptible_timeout(idt_fifo.waitq,
!chan->read_data_ok,
XILLY_RESPONSE_TIMEOUT);
if (xdev->error) {
rc = xdev->error;
goto unfifo;
}
if (rc < 0) {
rc = -EINTR; /* Interrupt on probe method? Interesting. */
goto unfifo;
}
if (chan->read_data_ok) {
rc = -ETIMEDOUT;
dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
goto unfifo;
}
idt_len = READ_ONCE(idt_fifo.fill);
idt = kmalloc(idt_len, GFP_KERNEL);
if (!idt) {
rc = -ENOMEM;
goto unfifo;
}
fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
if (crc32_le(~0, idt, idt_len) != 0) {
dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
rc = -ENODEV;
goto unidt;
}
if (*idt > 0x90) {
dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
(int)*idt);
rc = -ENODEV;
goto unidt;
}
/* Phase II: Set up the streams as defined in IDT */
num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
names_offset = 3 + num_channels * 4;
idt_len -= 4; /* Exclude CRC */
if (idt_len < names_offset) {
dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
rc = -ENODEV;
goto unidt;
}
rc = setup_channels(xdev, (void *)idt + 3, num_channels);
if (rc)
goto unidt;
/*
* Except for wildly misbehaving hardware, or if it was disconnected
* just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the
* device's workqueue and the wakeup work item. This rarely
* does anything.
*/
flush_workqueue(xdev->workq);
flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels;
fifo_mem_release(&idt_fifo);
kfree(chan);
rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
THIS_MODULE, xdev,
idt + names_offset,
idt_len - names_offset,
num_channels,
xillyname, true);
kfree(idt);
return rc;
unidt:
kfree(idt);
unfifo:
safely_assign_in_fifo(chan, NULL);
fifo_mem_release(&idt_fifo);
return rc;
}
static int xillyusb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct xillyusb_dev *xdev;
int rc;
xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
kref_init(&xdev->kref);
mutex_init(&xdev->process_in_mutex);
mutex_init(&xdev->msg_mutex);
xdev->udev = usb_get_dev(interface_to_usbdev(interface));
xdev->dev = &interface->dev;
xdev->error = 0;
spin_lock_init(&xdev->error_lock);
xdev->in_counter = 0;
xdev->in_bytes_left = 0;
xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
if (!xdev->workq) {
dev_err(&interface->dev, "Failed to allocate work queue\n");
rc = -ENOMEM;
goto fail;
}
INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
usb_set_intfdata(interface, xdev);
rc = xillyusb_setup_base_eps(xdev);
if (rc)
goto fail;
rc = xillyusb_discovery(interface);
if (rc)
goto latefail;
return 0;
latefail:
endpoint_quiesce(xdev->in_ep);
endpoint_quiesce(xdev->msg_ep);
fail:
usb_set_intfdata(interface, NULL);
kref_put(&xdev->kref, cleanup_dev);
return rc;
}
static void xillyusb_disconnect(struct usb_interface *interface)
{
struct xillyusb_dev *xdev = usb_get_intfdata(interface);
struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
struct xillyfifo *fifo = &msg_ep->fifo;
int rc;
int i;
xillybus_cleanup_chrdev(xdev, &interface->dev);
/*
* Try to send OPCODE_QUIESCE, which will fail silently if the device
* was disconnected, but makes sense on module unload.
*/
msg_ep->wake_on_drain = true;
xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
/*
* If the device has been disconnected, sending the opcode causes
* a global device error with xdev->error, if such error didn't
* occur earlier. Hence timing out means that the USB link is fine,
* but somehow the message wasn't sent. Should never happen.
*/
rc = wait_event_interruptible_timeout(fifo->waitq,
msg_ep->drained || xdev->error,
XILLY_RESPONSE_TIMEOUT);
if (!rc)
dev_err(&interface->dev,
"Weird timeout condition on sending quiesce request.\n");
report_io_error(xdev, -ENODEV); /* Discourage further activity */
/*
* This device driver is declared with soft_unbind set, or else
* sending OPCODE_QUIESCE above would always fail. The price is
* that the USB framework didn't kill outstanding URBs, so it has
* to be done explicitly before returning from this call.
*/
for (i = 0; i < xdev->num_channels; i++) {
struct xillyusb_channel *chan = &xdev->channels[i];
/*
* Lock taken to prevent chan->out_ep from changing. It also
* ensures xillyusb_open() and xillyusb_flush() don't access
* xdev->dev after being nullified below.
*/
mutex_lock(&chan->lock);
if (chan->out_ep)
endpoint_quiesce(chan->out_ep);
mutex_unlock(&chan->lock);
}
endpoint_quiesce(xdev->in_ep);
endpoint_quiesce(xdev->msg_ep);
usb_set_intfdata(interface, NULL);
xdev->dev = NULL;
mutex_lock(&kref_mutex);
kref_put(&xdev->kref, cleanup_dev);
mutex_unlock(&kref_mutex);
}
static struct usb_driver xillyusb_driver = {
.name = xillyname,
.id_table = xillyusb_table,
.probe = xillyusb_probe,
.disconnect = xillyusb_disconnect,
.soft_unbind = 1,
};
static int __init xillyusb_init(void)
{
int rc = 0;
wakeup_wq = alloc_workqueue(xillyname, 0, 0);
if (!wakeup_wq)
return -ENOMEM;
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else
fifo_buf_order = 0;
rc = usb_register(&xillyusb_driver);
if (rc)
destroy_workqueue(wakeup_wq);
return rc;
}
static void __exit xillyusb_exit(void)
{
usb_deregister(&xillyusb_driver);
destroy_workqueue(wakeup_wq);
}
module_init(xillyusb_init);
module_exit(xillyusb_exit);
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _POWERPC_XMON_DIS_ASM_H
#define _POWERPC_XMON_DIS_ASM_H
/*
* Copyright (C) 2006 Michael Ellerman, IBM Corporation.
*/
extern void print_address (unsigned long memaddr);
#ifdef CONFIG_XMON_DISASSEMBLY
extern int print_insn_powerpc(unsigned long insn, unsigned long memaddr);
extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
#else
static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
{
printf("%.8lx", insn);
return 0;
}
static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
{
printf("%.8lx", insn);
return 0;
}
#endif
#endif /* _POWERPC_XMON_DIS_ASM_H */
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "spectrum.h"
struct mlxsw_sp_kvdl {
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
struct mutex kvdl_lock; /* Protects kvdl allocations */
unsigned long priv[];
/* priv has to be always the last item */
};
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
struct mlxsw_sp_kvdl *kvdl;
int err;
kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
mutex_init(&kvdl->kvdl_lock);
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
if (err)
goto err_init;
return 0;
err_init:
mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
return err;
}
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
}
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, u32 *p_entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int err;
mutex_lock(&kvdl->kvdl_lock);
err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
entry_count, p_entry_index);
mutex_unlock(&kvdl->kvdl_lock);
return err;
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count, int entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
mutex_lock(&kvdl->kvdl_lock);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
mutex_unlock(&kvdl->kvdl_lock);
}
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_kvdl_entry_type type,
unsigned int entry_count,
unsigned int *p_alloc_count)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
entry_count, p_alloc_count);
}
|
/*
* Copyright (C) 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _hdp_4_0_OFFSET_HEADER
#define _hdp_4_0_OFFSET_HEADER
// addressBlock: hdp_hdpdec
// base address: 0x3c80
#define mmHDP_MMHUB_TLVL 0x0000
#define mmHDP_MMHUB_TLVL_BASE_IDX 0
#define mmHDP_MMHUB_UNITID 0x0001
#define mmHDP_MMHUB_UNITID_BASE_IDX 0
#define mmHDP_NONSURFACE_BASE 0x0040
#define mmHDP_NONSURFACE_BASE_BASE_IDX 0
#define mmHDP_NONSURFACE_INFO 0x0041
#define mmHDP_NONSURFACE_INFO_BASE_IDX 0
#define mmHDP_NONSURFACE_BASE_HI 0x0042
#define mmHDP_NONSURFACE_BASE_HI_BASE_IDX 0
#define mmHDP_NONSURF_FLAGS 0x00c8
#define mmHDP_NONSURF_FLAGS_BASE_IDX 0
#define mmHDP_NONSURF_FLAGS_CLR 0x00c9
#define mmHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
#define mmHDP_HOST_PATH_CNTL 0x00cc
#define mmHDP_HOST_PATH_CNTL_BASE_IDX 0
#define mmHDP_SW_SEMAPHORE 0x00cd
#define mmHDP_SW_SEMAPHORE_BASE_IDX 0
#define mmHDP_DEBUG0 0x00ce
#define mmHDP_DEBUG0_BASE_IDX 0
#define mmHDP_LAST_SURFACE_HIT 0x00d0
#define mmHDP_LAST_SURFACE_HIT_BASE_IDX 0
#define mmHDP_READ_CACHE_INVALIDATE 0x00d1
#define mmHDP_READ_CACHE_INVALIDATE_BASE_IDX 0
#define mmHDP_OUTSTANDING_REQ 0x00d2
#define mmHDP_OUTSTANDING_REQ_BASE_IDX 0
#define mmHDP_MISC_CNTL 0x00d3
#define mmHDP_MISC_CNTL_BASE_IDX 0
#define mmHDP_MEM_POWER_LS 0x00d4
#define mmHDP_MEM_POWER_LS_BASE_IDX 0
#define mmHDP_MMHUB_CNTL 0x00d5
#define mmHDP_MMHUB_CNTL_BASE_IDX 0
#define mmHDP_EDC_CNT 0x00d6
#define mmHDP_EDC_CNT_BASE_IDX 0
#define mmHDP_VERSION 0x00d7
#define mmHDP_VERSION_BASE_IDX 0
#define mmHDP_CLK_CNTL 0x00d8
#define mmHDP_CLK_CNTL_BASE_IDX 0
#define mmHDP_MEMIO_CNTL 0x00f6
#define mmHDP_MEMIO_CNTL_BASE_IDX 0
#define mmHDP_MEMIO_ADDR 0x00f7
#define mmHDP_MEMIO_ADDR_BASE_IDX 0
#define mmHDP_MEMIO_STATUS 0x00f8
#define mmHDP_MEMIO_STATUS_BASE_IDX 0
#define mmHDP_MEMIO_WR_DATA 0x00f9
#define mmHDP_MEMIO_WR_DATA_BASE_IDX 0
#define mmHDP_MEMIO_RD_DATA 0x00fa
#define mmHDP_MEMIO_RD_DATA_BASE_IDX 0
#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0100
#define mmHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
#define mmHDP_XDP_D2H_FLUSH 0x0101
#define mmHDP_XDP_D2H_FLUSH_BASE_IDX 0
#define mmHDP_XDP_D2H_BAR_UPDATE 0x0102
#define mmHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_3 0x0103
#define mmHDP_XDP_D2H_RSVD_3_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_4 0x0104
#define mmHDP_XDP_D2H_RSVD_4_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_5 0x0105
#define mmHDP_XDP_D2H_RSVD_5_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_6 0x0106
#define mmHDP_XDP_D2H_RSVD_6_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_7 0x0107
#define mmHDP_XDP_D2H_RSVD_7_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_8 0x0108
#define mmHDP_XDP_D2H_RSVD_8_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_9 0x0109
#define mmHDP_XDP_D2H_RSVD_9_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_10 0x010a
#define mmHDP_XDP_D2H_RSVD_10_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_11 0x010b
#define mmHDP_XDP_D2H_RSVD_11_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_12 0x010c
#define mmHDP_XDP_D2H_RSVD_12_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_13 0x010d
#define mmHDP_XDP_D2H_RSVD_13_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_14 0x010e
#define mmHDP_XDP_D2H_RSVD_14_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_15 0x010f
#define mmHDP_XDP_D2H_RSVD_15_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_16 0x0110
#define mmHDP_XDP_D2H_RSVD_16_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_17 0x0111
#define mmHDP_XDP_D2H_RSVD_17_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_18 0x0112
#define mmHDP_XDP_D2H_RSVD_18_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_19 0x0113
#define mmHDP_XDP_D2H_RSVD_19_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_20 0x0114
#define mmHDP_XDP_D2H_RSVD_20_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_21 0x0115
#define mmHDP_XDP_D2H_RSVD_21_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_22 0x0116
#define mmHDP_XDP_D2H_RSVD_22_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_23 0x0117
#define mmHDP_XDP_D2H_RSVD_23_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_24 0x0118
#define mmHDP_XDP_D2H_RSVD_24_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_25 0x0119
#define mmHDP_XDP_D2H_RSVD_25_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_26 0x011a
#define mmHDP_XDP_D2H_RSVD_26_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_27 0x011b
#define mmHDP_XDP_D2H_RSVD_27_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_28 0x011c
#define mmHDP_XDP_D2H_RSVD_28_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_29 0x011d
#define mmHDP_XDP_D2H_RSVD_29_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_30 0x011e
#define mmHDP_XDP_D2H_RSVD_30_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_31 0x011f
#define mmHDP_XDP_D2H_RSVD_31_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_32 0x0120
#define mmHDP_XDP_D2H_RSVD_32_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_33 0x0121
#define mmHDP_XDP_D2H_RSVD_33_BASE_IDX 0
#define mmHDP_XDP_D2H_RSVD_34 0x0122
#define mmHDP_XDP_D2H_RSVD_34_BASE_IDX 0
#define mmHDP_XDP_DIRECT2HDP_LAST 0x0123
#define mmHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR_CFG 0x0124
#define mmHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_OFFSET 0x0125
#define mmHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR0 0x0126
#define mmHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR1 0x0127
#define mmHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR2 0x0128
#define mmHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR3 0x0129
#define mmHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR4 0x012a
#define mmHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR5 0x012b
#define mmHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
#define mmHDP_XDP_P2P_MBX_ADDR6 0x012c
#define mmHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
#define mmHDP_XDP_HDP_MBX_MC_CFG 0x012d
#define mmHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
#define mmHDP_XDP_HDP_MC_CFG 0x012e
#define mmHDP_XDP_HDP_MC_CFG_BASE_IDX 0
#define mmHDP_XDP_HST_CFG 0x012f
#define mmHDP_XDP_HST_CFG_BASE_IDX 0
#define mmHDP_XDP_HDP_IPH_CFG 0x0131
#define mmHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR0 0x0134
#define mmHDP_XDP_P2P_BAR0_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR1 0x0135
#define mmHDP_XDP_P2P_BAR1_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR2 0x0136
#define mmHDP_XDP_P2P_BAR2_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR3 0x0137
#define mmHDP_XDP_P2P_BAR3_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR4 0x0138
#define mmHDP_XDP_P2P_BAR4_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR5 0x0139
#define mmHDP_XDP_P2P_BAR5_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR6 0x013a
#define mmHDP_XDP_P2P_BAR6_BASE_IDX 0
#define mmHDP_XDP_P2P_BAR7 0x013b
#define mmHDP_XDP_P2P_BAR7_BASE_IDX 0
#define mmHDP_XDP_FLUSH_ARMED_STS 0x013c
#define mmHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
#define mmHDP_XDP_FLUSH_CNTR0_STS 0x013d
#define mmHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
#define mmHDP_XDP_BUSY_STS 0x013e
#define mmHDP_XDP_BUSY_STS_BASE_IDX 0
#define mmHDP_XDP_STICKY 0x013f
#define mmHDP_XDP_STICKY_BASE_IDX 0
#define mmHDP_XDP_CHKN 0x0140
#define mmHDP_XDP_CHKN_BASE_IDX 0
#define mmHDP_XDP_BARS_ADDR_39_36 0x0144
#define mmHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
#define mmHDP_XDP_MMHUB_ERROR 0x0149
#define mmHDP_XDP_MMHUB_ERROR_BASE_IDX 0
#endif
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "cgrp_kfunc_common.h"
char _license[] SEC("license") = "GPL";
int err, pid, invocations;
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(cgroup_mkdir,
* TP_PROTO(struct cgroup *cgrp, const char *path),
* TP_ARGS(cgrp, path)
*/
static bool is_test_kfunc_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
bool same = pid == cur_pid;
if (same)
__sync_fetch_and_add(&invocations, 1);
return same;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
if (!is_test_kfunc_task())
return 0;
acquired = bpf_cgroup_acquire(cgrp);
if (!acquired)
err = 1;
else
bpf_cgroup_release(acquired);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
{
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status)
err = 1;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr, *cg;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
kptr = v->cgrp;
if (!kptr) {
err = 4;
return 0;
}
cg = bpf_cgroup_ancestor(kptr, 1);
if (cg) /* verifier only check */
bpf_cgroup_release(cg);
kptr = bpf_kptr_xchg(&v->cgrp, NULL);
if (!kptr) {
err = 3;
return 0;
}
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
long status;
if (!is_test_kfunc_task())
return 0;
status = cgrps_kfunc_map_insert(cgrp);
if (status) {
err = 1;
return 0;
}
v = cgrps_kfunc_map_value_lookup(cgrp);
if (!v) {
err = 2;
return 0;
}
bpf_rcu_read_lock();
kptr = v->cgrp;
if (!kptr)
err = 3;
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
{
struct cgroup *self, *ancestor1, *invalid;
if (!is_test_kfunc_task())
return 0;
self = bpf_cgroup_ancestor(cgrp, cgrp->level);
if (!self) {
err = 1;
return 0;
}
if (self->self.id != cgrp->self.id) {
bpf_cgroup_release(self);
err = 2;
return 0;
}
bpf_cgroup_release(self);
ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
if (!ancestor1) {
err = 3;
return 0;
}
bpf_cgroup_release(ancestor1);
invalid = bpf_cgroup_ancestor(cgrp, 10000);
if (invalid) {
bpf_cgroup_release(invalid);
err = 4;
return 0;
}
invalid = bpf_cgroup_ancestor(cgrp, -1);
if (invalid) {
bpf_cgroup_release(invalid);
err = 5;
return 0;
}
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path)
{
struct cgroup *parent, *res;
u64 parent_cgid;
if (!is_test_kfunc_task())
return 0;
/* @cgrp's ID is not visible yet, let's test with the parent */
parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
if (!parent) {
err = 1;
return 0;
}
parent_cgid = parent->kn->id;
bpf_cgroup_release(parent);
res = bpf_cgroup_from_id(parent_cgid);
if (!res) {
err = 2;
return 0;
}
bpf_cgroup_release(res);
if (res != parent) {
err = 3;
return 0;
}
res = bpf_cgroup_from_id((u64)-1);
if (res) {
bpf_cgroup_release(res);
err = 4;
return 0;
}
return 0;
}
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
#include "selftests/librapl.h"
static u64 rc6_residency(struct intel_rc6 *rc6)
{
u64 result;
/* XXX VLV_GT_MEDIA_RC6? */
result = intel_rc6_residency_ns(rc6, INTEL_RC6_RES_RC6);
if (HAS_RC6p(rc6_to_i915(rc6)))
result += intel_rc6_residency_ns(rc6, INTEL_RC6_RES_RC6p);
if (HAS_RC6pp(rc6_to_i915(rc6)))
result += intel_rc6_residency_ns(rc6, INTEL_RC6_RES_RC6pp);
return result;
}
int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = >->rc6;
u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
bool has_power;
ktime_t dt;
u64 res[2];
int err = 0;
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
* Let's try it!
*/
if (!rc6->enabled)
return 0;
/* bsw/byt use a PCU and decouple RC6 from our manual control */
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
return 0;
has_power = librapl_supported(gt->i915);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
/* Force RC6 off for starters */
__intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
res[0] = rc6_residency(rc6);
dt = ktime_get();
rc0_power = librapl_energy_uJ();
msleep(1000);
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
err = -EINVAL;
goto out_unlock;
}
if (has_power) {
rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
ktime_to_ns(dt));
if (!rc0_power) {
pr_err("No power measured while in RC0\n");
err = -EINVAL;
goto out_unlock;
}
}
/* Manually enter RC6 */
intel_rc6_park(rc6);
res[0] = rc6_residency(rc6);
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
rc6_power = librapl_energy_uJ();
msleep(100);
rc6_power = librapl_energy_uJ() - rc6_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
res[0]);
err = -EINVAL;
}
if (has_power) {
rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
ktime_to_ns(dt));
pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
rc0_power, rc6_power);
if (2 * rc6_power > rc0_power) {
pr_err("GPU leaked energy while in RC6!\n");
err = -EINVAL;
goto out_unlock;
}
}
/* Restore what should have been the original state! */
intel_rc6_unpark(rc6);
out_unlock:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
static const u32 *__live_rc6_ctx(struct intel_context *ce)
{
struct i915_request *rq;
const u32 *result;
u32 cmd;
u32 *cs;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return ERR_CAST(rq);
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
i915_request_add(rq);
return cs;
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
*cs++ = ce->timeline->hwsp_offset + 8;
*cs++ = 0;
intel_ring_advance(rq, cs);
result = rq->hwsp_seqno + 2;
i915_request_add(rq);
return result;
}
static struct intel_engine_cs **
randomised_engines(struct intel_gt *gt,
struct rnd_state *prng,
unsigned int *count)
{
struct intel_engine_cs *engine, **engines;
enum intel_engine_id id;
int n;
n = 0;
for_each_engine(engine, gt, id)
n++;
if (!n)
return NULL;
engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
if (!engines)
return NULL;
n = 0;
for_each_engine(engine, gt, id)
engines[n++] = engine;
i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
*count = n;
return engines;
}
int live_rc6_ctx_wa(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs **engines;
unsigned int n, count;
I915_RND_STATE(prng);
int err = 0;
/* A read of CTX_INFO upsets rc6. Poke the bear! */
if (GRAPHICS_VER(gt->i915) < 8)
return 0;
engines = randomised_engines(gt, &prng, &count);
if (!engines)
return 0;
for (n = 0; n < count; n++) {
struct intel_engine_cs *engine = engines[n];
int pass;
for (pass = 0; pass < 2; pass++) {
struct i915_gpu_error *error = >->i915->gpu_error;
struct intel_context *ce;
unsigned int resets =
i915_reset_engine_count(error, engine);
const u32 *res;
/* Use a sacrifical context */
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
intel_engine_pm_get(engine);
res = __live_rc6_ctx(ce);
intel_engine_pm_put(engine);
intel_context_put(ce);
if (IS_ERR(res)) {
err = PTR_ERR(res);
goto out;
}
if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
intel_gt_set_wedged(gt);
err = -ETIME;
goto out;
}
intel_gt_pm_wait_for_idle(gt);
pr_debug("%s: CTX_INFO=%0x\n",
engine->name, READ_ONCE(*res));
if (resets !=
i915_reset_engine_count(error, engine)) {
pr_err("%s: GPU reset required\n",
engine->name);
add_taint_for_CI(gt->i915, TAINT_WARN);
err = -EIO;
goto out;
}
}
}
out:
kfree(engines);
return err;
}
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* drmem.h: Power specific logical memory block representation
*
* Copyright 2017 IBM Corporation
*/
#ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H
#include <linux/sched.h>
struct drmem_lmb {
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
};
struct drmem_lmb_info {
struct drmem_lmb *lmbs;
int n_lmbs;
u64 lmb_size;
};
struct device_node;
struct property;
extern struct drmem_lmb_info *drmem_info;
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
const struct drmem_lmb *start)
{
/*
* DLPAR code paths can take several milliseconds per element
* when interacting with firmware. Ensure that we don't
* unfairly monopolize the CPU.
*/
if (((++lmb - start) % 16) == 0)
cond_resched();
return lmb;
}
#define for_each_drmem_lmb_in_range(lmb, start, end) \
for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
&drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data
* specified in the ibm,dynamic-memory device tree property.
* The property itself is a 32-bit value specifying the number of
* LMBs followed by an array of of_drconf_cell_v1 entries, one
* per LMB.
*/
struct of_drconf_cell_v1 {
__be64 base_addr;
__be32 drc_index;
__be32 reserved;
__be32 aa_index;
__be32 flags;
};
/*
* Version 2 of the ibm,dynamic-memory property is defined as a
* 32-bit value specifying the number of LMB sets followed by an
* array of of_drconf_cell_v2 entries, one per LMB set.
*/
struct of_drconf_cell_v2 {
u32 seq_lmbs;
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
} __packed;
#define DRCONF_MEM_ASSIGNED 0x00000008
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
#define DRCONF_MEM_HOTREMOVABLE 0x00000100
static inline u64 drmem_lmb_size(void)
{
return drmem_info->lmb_size;
}
#define DRMEM_LMB_RESERVED 0x80000000
static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
{
lmb->flags |= DRMEM_LMB_RESERVED;
}
static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
{
lmb->flags &= ~DRMEM_LMB_RESERVED;
}
static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
{
return lmb->flags & DRMEM_LMB_RESERVED;
}
u64 drmem_lmb_memory_max(void);
int walk_drmem_lmbs(struct device_node *dn, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *));
int drmem_update_dt(void);
#ifdef CONFIG_PPC_PSERIES
int __init
walk_drmem_lmbs_early(unsigned long node, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *));
void drmem_update_lmbs(struct property *prop);
#endif
static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
{
lmb->aa_index = 0xffffffff;
}
#endif /* _ASM_POWERPC_LMB_H */
|
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
//
// tegra210_dmic.c - Tegra210 DMIC driver
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "tegra210_dmic.h"
#include "tegra_cif.h"
static const struct reg_default tegra210_dmic_reg_defaults[] = {
{ TEGRA210_DMIC_TX_INT_MASK, 0x00000001 },
{ TEGRA210_DMIC_TX_CIF_CTRL, 0x00007700 },
{ TEGRA210_DMIC_CG, 0x1 },
{ TEGRA210_DMIC_CTRL, 0x00000301 },
/* Below enables all filters - DCR, LP and SC */
{ TEGRA210_DMIC_DBG_CTRL, 0xe },
/* Below as per latest POR value */
{ TEGRA210_DMIC_DCR_BIQUAD_0_COEF_4, 0x0 },
/* LP filter is configured for pass through and used to apply gain */
{ TEGRA210_DMIC_LP_BIQUAD_0_COEF_0, 0x00800000 },
{ TEGRA210_DMIC_LP_BIQUAD_0_COEF_1, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_0_COEF_2, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_0_COEF_3, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_0_COEF_4, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_0, 0x00800000 },
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_1, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_2, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_3, 0x0 },
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_4, 0x0 },
};
static int __maybe_unused tegra210_dmic_runtime_suspend(struct device *dev)
{
struct tegra210_dmic *dmic = dev_get_drvdata(dev);
regcache_cache_only(dmic->regmap, true);
regcache_mark_dirty(dmic->regmap);
clk_disable_unprepare(dmic->clk_dmic);
return 0;
}
static int __maybe_unused tegra210_dmic_runtime_resume(struct device *dev)
{
struct tegra210_dmic *dmic = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(dmic->clk_dmic);
if (err) {
dev_err(dev, "failed to enable DMIC clock, err: %d\n", err);
return err;
}
regcache_cache_only(dmic->regmap, false);
regcache_sync(dmic->regmap);
return 0;
}
static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct tegra210_dmic *dmic = snd_soc_dai_get_drvdata(dai);
unsigned int srate, clk_rate, channels;
struct tegra_cif_conf cif_conf;
unsigned long long gain_q23 = DEFAULT_GAIN_Q23;
int err;
memset(&cif_conf, 0, sizeof(struct tegra_cif_conf));
channels = params_channels(params);
cif_conf.audio_ch = channels;
switch (dmic->ch_select) {
case DMIC_CH_SELECT_LEFT:
case DMIC_CH_SELECT_RIGHT:
cif_conf.client_ch = 1;
break;
case DMIC_CH_SELECT_STEREO:
cif_conf.client_ch = 2;
break;
default:
dev_err(dai->dev, "invalid DMIC client channels\n");
return -EINVAL;
}
srate = params_rate(params);
/*
* DMIC clock rate is a multiple of 'Over Sampling Ratio' and
* 'Sample Rate'. The supported OSR values are 64, 128 and 256.
*/
clk_rate = (DMIC_OSR_FACTOR << dmic->osr_val) * srate;
err = clk_set_rate(dmic->clk_dmic, clk_rate);
if (err) {
dev_err(dai->dev, "can't set DMIC clock rate %u, err: %d\n",
clk_rate, err);
return err;
}
regmap_update_bits(dmic->regmap,
/* Reg */
TEGRA210_DMIC_CTRL,
/* Mask */
TEGRA210_DMIC_CTRL_LRSEL_POLARITY_MASK |
TEGRA210_DMIC_CTRL_OSR_MASK |
TEGRA210_DMIC_CTRL_CHANNEL_SELECT_MASK,
/* Value */
(dmic->lrsel << LRSEL_POL_SHIFT) |
(dmic->osr_val << OSR_SHIFT) |
((dmic->ch_select + 1) << CH_SEL_SHIFT));
/*
* Use LP filter gain register to apply boost.
* Boost Gain Volume control has 100x factor.
*/
if (dmic->boost_gain)
gain_q23 = div_u64(gain_q23 * dmic->boost_gain, 100);
regmap_write(dmic->regmap, TEGRA210_DMIC_LP_FILTER_GAIN,
(unsigned int)gain_q23);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
cif_conf.audio_bits = TEGRA_ACIF_BITS_16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S32_LE:
cif_conf.audio_bits = TEGRA_ACIF_BITS_32;
break;
default:
dev_err(dai->dev, "unsupported format!\n");
return -EOPNOTSUPP;
}
cif_conf.client_bits = TEGRA_ACIF_BITS_24;
cif_conf.mono_conv = dmic->mono_to_stereo;
cif_conf.stereo_conv = dmic->stereo_to_mono;
tegra_set_cif(dmic->regmap, TEGRA210_DMIC_TX_CIF_CTRL, &cif_conf);
return 0;
}
static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.integer.value[0] = dmic->boost_gain;
return 0;
}
static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
int value = ucontrol->value.integer.value[0];
if (value == dmic->boost_gain)
return 0;
dmic->boost_gain = value;
return 1;
}
static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.enumerated.item[0] = dmic->ch_select;
return 0;
}
static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == dmic->ch_select)
return 0;
dmic->ch_select = value;
return 1;
}
static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
return 0;
}
static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == dmic->mono_to_stereo)
return 0;
dmic->mono_to_stereo = value;
return 1;
}
static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
return 0;
}
static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == dmic->stereo_to_mono)
return 0;
dmic->stereo_to_mono = value;
return 1;
}
static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.enumerated.item[0] = dmic->osr_val;
return 0;
}
static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == dmic->osr_val)
return 0;
dmic->osr_val = value;
return 1;
}
static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
ucontrol->value.enumerated.item[0] = dmic->lrsel;
return 0;
}
static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == dmic->lrsel)
return 0;
dmic->lrsel = value;
return 1;
}
static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
.hw_params = tegra210_dmic_hw_params,
};
static struct snd_soc_dai_driver tegra210_dmic_dais[] = {
{
.name = "DMIC-CIF",
.capture = {
.stream_name = "CIF-Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
},
{
.name = "DMIC-DAP",
.capture = {
.stream_name = "DAP-Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
.ops = &tegra210_dmic_dai_ops,
.symmetric_rate = 1,
},
};
static const struct snd_soc_dapm_widget tegra210_dmic_widgets[] = {
SND_SOC_DAPM_AIF_OUT("TX", NULL, 0, TEGRA210_DMIC_ENABLE, 0, 0),
SND_SOC_DAPM_MIC("MIC", NULL),
};
static const struct snd_soc_dapm_route tegra210_dmic_routes[] = {
{ "XBAR-RX", NULL, "XBAR-Capture" },
{ "XBAR-Capture", NULL, "CIF-Capture" },
{ "CIF-Capture", NULL, "TX" },
{ "TX", NULL, "DAP-Capture" },
{ "DAP-Capture", NULL, "MIC" },
};
static const char * const tegra210_dmic_ch_select[] = {
"Left", "Right", "Stereo",
};
static const struct soc_enum tegra210_dmic_ch_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_dmic_ch_select),
tegra210_dmic_ch_select);
static const char * const tegra210_dmic_mono_conv_text[] = {
"Zero", "Copy",
};
static const char * const tegra210_dmic_stereo_conv_text[] = {
"CH0", "CH1", "AVG",
};
static const struct soc_enum tegra210_dmic_mono_conv_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_dmic_mono_conv_text),
tegra210_dmic_mono_conv_text);
static const struct soc_enum tegra210_dmic_stereo_conv_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_dmic_stereo_conv_text),
tegra210_dmic_stereo_conv_text);
static const char * const tegra210_dmic_osr_text[] = {
"OSR_64", "OSR_128", "OSR_256",
};
static const struct soc_enum tegra210_dmic_osr_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_dmic_osr_text),
tegra210_dmic_osr_text);
static const char * const tegra210_dmic_lrsel_text[] = {
"Left", "Right",
};
static const struct soc_enum tegra210_dmic_lrsel_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_dmic_lrsel_text),
tegra210_dmic_lrsel_text);
static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
tegra210_dmic_get_boost_gain,
tegra210_dmic_put_boost_gain),
SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
SOC_ENUM_EXT("Mono To Stereo",
tegra210_dmic_mono_conv_enum,
tegra210_dmic_get_mono_to_stereo,
tegra210_dmic_put_mono_to_stereo),
SOC_ENUM_EXT("Stereo To Mono",
tegra210_dmic_stereo_conv_enum,
tegra210_dmic_get_stereo_to_mono,
tegra210_dmic_put_stereo_to_mono),
SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
};
static const struct snd_soc_component_driver tegra210_dmic_compnt = {
.dapm_widgets = tegra210_dmic_widgets,
.num_dapm_widgets = ARRAY_SIZE(tegra210_dmic_widgets),
.dapm_routes = tegra210_dmic_routes,
.num_dapm_routes = ARRAY_SIZE(tegra210_dmic_routes),
.controls = tegra210_dmic_controls,
.num_controls = ARRAY_SIZE(tegra210_dmic_controls),
};
static bool tegra210_dmic_wr_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA210_DMIC_TX_INT_MASK ... TEGRA210_DMIC_TX_CIF_CTRL:
case TEGRA210_DMIC_ENABLE ... TEGRA210_DMIC_CG:
case TEGRA210_DMIC_CTRL:
case TEGRA210_DMIC_DBG_CTRL:
case TEGRA210_DMIC_DCR_BIQUAD_0_COEF_4 ... TEGRA210_DMIC_LP_BIQUAD_1_COEF_4:
return true;
default:
return false;
}
}
static bool tegra210_dmic_rd_reg(struct device *dev, unsigned int reg)
{
if (tegra210_dmic_wr_reg(dev, reg))
return true;
switch (reg) {
case TEGRA210_DMIC_TX_STATUS:
case TEGRA210_DMIC_TX_INT_STATUS:
case TEGRA210_DMIC_STATUS:
case TEGRA210_DMIC_INT_STATUS:
return true;
default:
return false;
}
}
static bool tegra210_dmic_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA210_DMIC_TX_STATUS:
case TEGRA210_DMIC_TX_INT_STATUS:
case TEGRA210_DMIC_TX_INT_SET:
case TEGRA210_DMIC_SOFT_RESET:
case TEGRA210_DMIC_STATUS:
case TEGRA210_DMIC_INT_STATUS:
return true;
default:
return false;
}
}
static const struct regmap_config tegra210_dmic_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = TEGRA210_DMIC_LP_BIQUAD_1_COEF_4,
.writeable_reg = tegra210_dmic_wr_reg,
.readable_reg = tegra210_dmic_rd_reg,
.volatile_reg = tegra210_dmic_volatile_reg,
.reg_defaults = tegra210_dmic_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(tegra210_dmic_reg_defaults),
.cache_type = REGCACHE_FLAT,
};
static int tegra210_dmic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra210_dmic *dmic;
void __iomem *regs;
int err;
dmic = devm_kzalloc(dev, sizeof(*dmic), GFP_KERNEL);
if (!dmic)
return -ENOMEM;
dmic->osr_val = DMIC_OSR_64;
dmic->ch_select = DMIC_CH_SELECT_STEREO;
dmic->lrsel = DMIC_LRSEL_LEFT;
dmic->boost_gain = 0;
dmic->stereo_to_mono = 0; /* "CH0" */
dev_set_drvdata(dev, dmic);
dmic->clk_dmic = devm_clk_get(dev, "dmic");
if (IS_ERR(dmic->clk_dmic)) {
dev_err(dev, "can't retrieve DMIC clock\n");
return PTR_ERR(dmic->clk_dmic);
}
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
dmic->regmap = devm_regmap_init_mmio(dev, regs,
&tegra210_dmic_regmap_config);
if (IS_ERR(dmic->regmap)) {
dev_err(dev, "regmap init failed\n");
return PTR_ERR(dmic->regmap);
}
regcache_cache_only(dmic->regmap, true);
err = devm_snd_soc_register_component(dev, &tegra210_dmic_compnt,
tegra210_dmic_dais,
ARRAY_SIZE(tegra210_dmic_dais));
if (err) {
dev_err(dev, "can't register DMIC component, err: %d\n", err);
return err;
}
pm_runtime_enable(dev);
return 0;
}
static void tegra210_dmic_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static const struct dev_pm_ops tegra210_dmic_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_dmic_runtime_suspend,
tegra210_dmic_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static const struct of_device_id tegra210_dmic_of_match[] = {
{ .compatible = "nvidia,tegra210-dmic" },
{},
};
MODULE_DEVICE_TABLE(of, tegra210_dmic_of_match);
static struct platform_driver tegra210_dmic_driver = {
.driver = {
.name = "tegra210-dmic",
.of_match_table = tegra210_dmic_of_match,
.pm = &tegra210_dmic_pm_ops,
},
.probe = tegra210_dmic_probe,
.remove = tegra210_dmic_remove,
};
module_platform_driver(tegra210_dmic_driver)
MODULE_AUTHOR("Rahul Mittal <[email protected]>");
MODULE_DESCRIPTION("Tegra210 ASoC DMIC driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2015 Freescale Semiconductor, Inc.
/dts-v1/;
#include "imx6ul.dtsi"
#include "imx6ul-14x14-evk.dtsi"
/ {
model = "Freescale i.MX6 UltraLite 14x14 EVK Board";
compatible = "fsl,imx6ul-14x14-evk", "fsl,imx6ul";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#include "am57xx-beagle-x15-common.dtsi"
/ {
/* NOTE: This describes the "original" pre-production A2 revision */
model = "TI AM5728 BeagleBoard-X15";
};
&tpd12s015 {
gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>, /* gpio7_10, CT CP HPD */
<&gpio6 28 GPIO_ACTIVE_HIGH>, /* gpio6_28, LS OE */
<&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
};
&mmc1 {
pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default>;
pinctrl-1 = <&mmc1_pins_hs>;
vmmc-supply = <&ldo1_reg>;
no-1-8-v;
};
&mmc2 {
pinctrl-names = "default", "hs", "ddr_3_3v";
pinctrl-0 = <&mmc2_pins_default>;
pinctrl-1 = <&mmc2_pins_hs>;
pinctrl-2 = <&mmc2_pins_ddr_3_3v_rev11 &mmc2_iodelay_ddr_3_3v_rev11_conf>;
};
/* errata i880 "Ethernet RGMII2 Limited to 10/100 Mbps" */
&phy1 {
max-speed = <100>;
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ASIX AX88172A based USB 2.0 Ethernet Devices
* Copyright (C) 2012 OMICRON electronics GmbH
*
* Supports external PHYs via phylib. Based on the driver for the
* AX88772. Original copyrights follow:
*
* Copyright (C) 2003-2006 David Hollis <[email protected]>
* Copyright (C) 2005 Phil Chang <[email protected]>
* Copyright (C) 2006 James Painter <[email protected]>
* Copyright (c) 2002-2003 TiVo Inc.
*/
#include "asix.h"
#include <linux/phy.h>
struct ax88172a_private {
struct mii_bus *mdio;
struct phy_device *phydev;
char phy_name[20];
u16 phy_addr;
u16 oldmode;
int use_embdphy;
struct asix_rx_fixup_info rx_fixup_info;
};
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
struct phy_device *phydev = netdev->phydev;
struct usbnet *dev = netdev_priv(netdev);
struct ax88172a_private *priv = dev->driver_priv;
u16 mode = 0;
if (phydev->link) {
mode = AX88772_MEDIUM_DEFAULT;
if (phydev->duplex == DUPLEX_HALF)
mode &= ~AX_MEDIUM_FD;
if (phydev->speed != SPEED_100)
mode &= ~AX_MEDIUM_PS;
}
if (mode != priv->oldmode) {
asix_write_medium_mode(dev, mode, 0);
priv->oldmode = mode;
netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n",
phydev->speed, phydev->duplex, mode);
phy_print_status(phydev);
}
}
static void ax88172a_status(struct usbnet *dev, struct urb *urb)
{
/* link changes are detected by polling the phy */
}
/* use phylib infrastructure */
static int ax88172a_init_mdio(struct usbnet *dev)
{
struct ax88172a_private *priv = dev->driver_priv;
int ret;
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
netdev_err(dev->net, "Could not allocate MDIO bus\n");
return -ENOMEM;
}
priv->mdio->priv = (void *)dev;
priv->mdio->read = &asix_mdio_bus_read;
priv->mdio->write = &asix_mdio_bus_write;
priv->mdio->name = "Asix MDIO Bus";
/* mii bus name is usb-<usb bus number>-<usb device number> */
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
ret = mdiobus_register(priv->mdio);
if (ret) {
netdev_err(dev->net, "Could not register MDIO bus\n");
goto mfree;
}
netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id);
return 0;
mfree:
mdiobus_free(priv->mdio);
return ret;
}
static void ax88172a_remove_mdio(struct usbnet *dev)
{
struct ax88172a_private *priv = dev->driver_priv;
netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
}
static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
static const struct ethtool_ops ax88172a_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.nway_reset = phy_ethtool_nway_reset,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
{
int ret;
ret = asix_sw_reset(dev, AX_SWRESET_IPPD, 0);
if (ret < 0)
goto err;
msleep(150);
ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, 0);
if (ret < 0)
goto err;
msleep(150);
ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD,
0);
if (ret < 0)
goto err;
return 0;
err:
return ret;
}
static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
u8 buf[ETH_ALEN];
struct ax88172a_private *priv;
ret = usbnet_get_endpoints(dev, intf);
if (ret)
return ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev->driver_priv = priv;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
ret = -EIO;
goto free;
}
eth_hw_addr_set(dev->net, buf);
dev->net->netdev_ops = &ax88172a_netdev_ops;
dev->net->ethtool_ops = &ax88172a_ethtool_ops;
/* are we using the internal or the external phy? */
ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf, 0);
if (ret < 0) {
netdev_err(dev->net, "Failed to read software interface selection register: %d\n",
ret);
goto free;
}
netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]);
switch (buf[0] & AX_PHY_SELECT_MASK) {
case AX_PHY_SELECT_INTERNAL:
netdev_dbg(dev->net, "use internal phy\n");
priv->use_embdphy = 1;
break;
case AX_PHY_SELECT_EXTERNAL:
netdev_dbg(dev->net, "use external phy\n");
priv->use_embdphy = 0;
break;
default:
netdev_err(dev->net, "Interface mode not supported by driver\n");
ret = -ENOTSUPP;
goto free;
}
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
/* init MDIO bus */
ret = ax88172a_init_mdio(dev);
if (ret)
goto free;
return 0;
free:
kfree(priv);
return ret;
}
static int ax88172a_stop(struct usbnet *dev)
{
struct ax88172a_private *priv = dev->driver_priv;
netdev_dbg(dev->net, "Stopping interface\n");
if (priv->phydev) {
netdev_info(dev->net, "Disconnecting from phy %s\n",
priv->phy_name);
phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
}
return 0;
}
static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct ax88172a_private *priv = dev->driver_priv;
ax88172a_remove_mdio(dev);
kfree(priv);
}
static int ax88172a_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
struct ax88172a_private *priv = dev->driver_priv;
int ret;
u16 rx_ctl;
ax88172a_reset_phy(dev, priv->use_embdphy);
msleep(150);
rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
ret = asix_write_rx_ctl(dev, 0x0000, 0);
if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
msleep(150);
ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
AX88772_IPG2_DEFAULT, 0, NULL, 0);
if (ret < 0) {
netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
goto out;
}
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
data->mac_addr, 0);
if (ret < 0)
goto out;
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0);
if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
rx_ctl);
rx_ctl = asix_read_medium_status(dev, 0);
netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n",
rx_ctl);
/* Connect to PHY */
snprintf(priv->phy_name, 20, PHY_ID_FMT,
priv->mdio->id, priv->phy_addr);
priv->phydev = phy_connect(dev->net, priv->phy_name,
&ax88172a_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
netdev_err(dev->net, "Could not connect to PHY device %s\n",
priv->phy_name);
ret = PTR_ERR(priv->phydev);
goto out;
}
netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name);
/* During power-up, the AX88172A set the power down (BMCR_PDOWN)
* bit of the PHY. Bring the PHY up again.
*/
genphy_resume(priv->phydev);
phy_start(priv->phydev);
return 0;
out:
return ret;
}
static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
struct ax88172a_private *dp = dev->driver_priv;
struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
return asix_rx_fixup_internal(dev, skb, rx);
}
const struct driver_info ax88172a_info = {
.description = "ASIX AX88172A USB 2.0 Ethernet",
.bind = ax88172a_bind,
.reset = ax88172a_reset,
.stop = ax88172a_stop,
.unbind = ax88172a_unbind,
.status = ax88172a_status,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = ax88172a_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
|
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (c) 2021 MediaTek Inc.
// Author: Chun-Jie Chen <[email protected]>
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "clk-mtk.h"
#include "clk-gate.h"
#include <dt-bindings/clock/mt8192-clk.h>
static const struct mtk_gate_regs vdec0_cg_regs = {
.set_ofs = 0x0,
.clr_ofs = 0x4,
.sta_ofs = 0x0,
};
static const struct mtk_gate_regs vdec1_cg_regs = {
.set_ofs = 0x200,
.clr_ofs = 0x204,
.sta_ofs = 0x200,
};
static const struct mtk_gate_regs vdec2_cg_regs = {
.set_ofs = 0x8,
.clr_ofs = 0xc,
.sta_ofs = 0x8,
};
#define GATE_VDEC0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
#define GATE_VDEC1(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
#define GATE_VDEC2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
/* VDEC0 */
GATE_VDEC0(CLK_VDEC_VDEC, "vdec_vdec", "vdec_sel", 0),
GATE_VDEC0(CLK_VDEC_ACTIVE, "vdec_active", "vdec_sel", 4),
/* VDEC1 */
GATE_VDEC1(CLK_VDEC_LAT, "vdec_lat", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LAT_ACTIVE, "vdec_lat_active", "vdec_sel", 4),
/* VDEC2 */
GATE_VDEC2(CLK_VDEC_LARB1, "vdec_larb1", "vdec_sel", 0),
};
static const struct mtk_gate vdec_soc_clks[] = {
/* VDEC_SOC0 */
GATE_VDEC0(CLK_VDEC_SOC_VDEC, "vdec_soc_vdec", "vdec_sel", 0),
GATE_VDEC0(CLK_VDEC_SOC_VDEC_ACTIVE, "vdec_soc_vdec_active", "vdec_sel", 4),
/* VDEC_SOC1 */
GATE_VDEC1(CLK_VDEC_SOC_LAT, "vdec_soc_lat", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_SOC_LAT_ACTIVE, "vdec_soc_lat_active", "vdec_sel", 4),
/* VDEC_SOC2 */
GATE_VDEC2(CLK_VDEC_SOC_LARB1, "vdec_soc_larb1", "vdec_sel", 0),
};
static const struct mtk_clk_desc vdec_desc = {
.clks = vdec_clks,
.num_clks = ARRAY_SIZE(vdec_clks),
};
static const struct mtk_clk_desc vdec_soc_desc = {
.clks = vdec_soc_clks,
.num_clks = ARRAY_SIZE(vdec_soc_clks),
};
static const struct of_device_id of_match_clk_mt8192_vdec[] = {
{
.compatible = "mediatek,mt8192-vdecsys",
.data = &vdec_desc,
}, {
.compatible = "mediatek,mt8192-vdecsys_soc",
.data = &vdec_soc_desc,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec);
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
.remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
},
};
module_platform_driver(clk_mt8192_vdec_drv);
MODULE_DESCRIPTION("MediaTek MT8192 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2008,2009 Ben Herrenschmidt <[email protected]>
* IBM Corp.
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Modifications by Paul Mackerras (PowerMac) ([email protected])
* and Cort Dougan (PReP) ([email protected])
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/text-patching.h>
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
* implementations at some point
*/
static int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
/*
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
* exceptions. This is used for bolted and e6500 TLB miss handlers which
* do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
* this is set to zero.
*/
int extlb_level_exc;
/*
* Handling of virtual linear page tables or indirect TLB entries
* flushing when PTE pages are freed
*/
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].shift - 10;
if (book3e_htw_mode != PPC_HTW_NONE) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
/* This isn't the most optimal, ideally we would factor out the
* while preempt & CPU mask mucking around, or even the IPI but
* it will do for now
*/
while (start < end) {
__flush_tlb_page(tlb->mm, start, tsize, 1);
start += size;
}
} else {
unsigned long rmask = 0xf000000000000000ul;
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask;
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
}
}
static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int eptcfg;
int psize;
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
if ((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
unsigned int min_pg, max_pg;
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def;
unsigned int shift;
def = &mmu_psize_defs[psize];
shift = def->shift;
if (shift == 0 || shift & 1)
continue;
/* adjust to be in terms of 4^shift Kb */
shift = (shift - 10) >> 1;
if ((shift >= min_pg) && (shift <= max_pg))
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
goto out;
}
if ((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
u32 tlb1cfg, tlb1ps;
tlb0cfg = mfspr(SPRN_TLB0CFG);
tlb1cfg = mfspr(SPRN_TLB1CFG);
tlb1ps = mfspr(SPRN_TLB1PS);
eptcfg = mfspr(SPRN_EPTCFG);
if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
book3e_htw_mode = PPC_HTW_E6500;
/*
* We expect 4K subpage size and unrestricted indirect size.
* The lack of a restriction on indirect size is a Freescale
* extension, indicated by PSn = 0 but SPSn != 0.
*/
if (eptcfg != 2)
book3e_htw_mode = PPC_HTW_NONE;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (!def->shift)
continue;
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
if (book3e_htw_mode && psize == MMU_PAGE_2M)
def->flags |= MMU_PAGE_SIZE_INDIRECT;
}
}
goto out;
}
out:
/* Cleanup array and print summary */
pr_info("MMU: Supported page sizes\n");
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
const char *__page_type_names[] = {
"unsupported",
"direct",
"indirect",
"direct & indirect"
};
if (def->flags == 0) {
def->shift = 0;
continue;
}
pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
__page_type_names[def->flags & 0x3]);
}
}
/*
* Early initialization of the MMU TLB code
*/
static void early_init_this_mmu(void)
{
unsigned int mas4;
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
switch (book3e_htw_mode) {
case PPC_HTW_E6500:
mas4 |= MAS4_INDD;
mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
mas4 |= MAS4_TLBSELD(1);
mmu_pte_psize = MMU_PAGE_2M;
break;
case PPC_HTW_NONE:
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
mmu_pte_psize = mmu_virtual_psize;
break;
}
mtspr(SPRN_MAS4, mas4);
unsigned int num_cams;
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
/*
* Only do the mapping once per core, or else the
* transient mapping would cause problems.
*/
#ifdef CONFIG_SMP
if (hweight32(get_tensr()) > 1)
map = false;
#endif
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
num_cams, false, true);
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/
mb();
}
static void __init early_init_mmu_global(void)
{
/*
* Freescale booke only supports 4K pages in TLB0, so use that.
*/
mmu_vmemmap_psize = MMU_PAGE_4K;
/* XXX This code only checks for TLB 0 capabilities and doesn't
* check what page size combos are supported by the HW. It
* also doesn't handle the case where a separate array holds
* the IND entries from the array loaded by the PT.
*/
/* Look for supported page sizes */
setup_page_sizes();
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
* handlers to branch to the one dedicated to it.
*/
extlb_level_exc = EX_TLB_SIZE;
switch (book3e_htw_mode) {
case PPC_HTW_E6500:
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
break;
}
pr_info("MMU: Book3E HW tablewalk %s\n",
book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
/* Set the global containing the top of the linear mapping
* for use by the TLB miss code
*/
linear_map_top = memblock_end_of_DRAM();
ioremap_bot = IOREMAP_BASE;
}
static void __init early_mmu_set_memory_limit(void)
{
/*
* Limit memory so we dont have linear faults.
* Unlike memblock_set_current_limit, which limits
* memory available during early boot, this permanently
* reduces the memory available to Linux. We need to
* do this because highmem is not supported on 64-bit.
*/
memblock_enforce_memory_limit(linear_map_top);
memblock_set_current_limit(linear_map_top);
}
/* boot cpu only */
void __init early_init_mmu(void)
{
early_init_mmu_global();
early_init_this_mmu();
early_mmu_set_memory_limit();
}
void early_init_mmu_secondary(void)
{
early_init_this_mmu();
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
/*
* On FSL Embedded 64-bit, usually all RAM is bolted, but with
* unusual memory sizes it's possible for some RAM to not be mapped
* (such RAM is not used at all by Linux, since we don't support
* highmem on 64-bit). We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one. Additional memblocks
* can only increase, not decrease, the amount that ends up getting
* mapped. We still limit max to 1G even if we'll eventually map
* more. This is due to what the early init code is set up to do.
*
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
unsigned long linear_sz;
unsigned int num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams, true, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Remote processor framework
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
*/
#ifndef REMOTEPROC_INTERNAL_H
#define REMOTEPROC_INTERNAL_H
#include <linux/irqreturn.h>
#include <linux/firmware.h>
struct rproc;
struct rproc_debug_trace {
struct rproc *rproc;
struct dentry *tfile;
struct list_head node;
struct rproc_mem_entry trace_mem;
};
/**
* struct rproc_vdev_data - remoteproc virtio device data
* @rsc_offset: offset of the vdev's resource entry
* @id: virtio device id (as in virtio_ids.h)
* @index: vdev position versus other vdev declared in resource table
* @rsc: pointer to the vdev resource entry. Valid only during vdev init as
* the resource can be cached by rproc.
*/
struct rproc_vdev_data {
u32 rsc_offset;
unsigned int id;
u32 index;
struct fw_rsc_vdev *rsc;
};
static inline bool rproc_has_feature(struct rproc *rproc, unsigned int feature)
{
return test_bit(feature, rproc->features);
}
static inline int rproc_set_feature(struct rproc *rproc, unsigned int feature)
{
if (feature >= RPROC_MAX_FEATURES)
return -EINVAL;
set_bit(feature, rproc->features);
return 0;
}
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
int rproc_of_parse_firmware(struct device *dev, int index,
const char **fw_name);
/* from remoteproc_virtio.c */
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
struct rproc_debug_trace *trace);
void rproc_delete_debug_dir(struct rproc *rproc);
void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
void rproc_exit_debugfs(void);
/* from remoteproc_sysfs.c */
extern const struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
#ifdef CONFIG_REMOTEPROC_CDEV
void rproc_init_cdev(void);
void rproc_exit_cdev(void);
int rproc_char_device_add(struct rproc *rproc);
void rproc_char_device_remove(struct rproc *rproc);
#else
static inline void rproc_init_cdev(void)
{
}
static inline void rproc_exit_cdev(void)
{
}
/*
* The character device interface is an optional feature, if it is not enabled
* the function should not return an error.
*/
static inline int rproc_char_device_add(struct rproc *rproc)
{
return 0;
}
static inline void rproc_char_device_remove(struct rproc *rproc)
{
}
#endif
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
int rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw);
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev);
void rproc_remove_rvdev(struct rproc_vdev *rvdev);
static inline int rproc_prepare_device(struct rproc *rproc)
{
if (rproc->ops->prepare)
return rproc->ops->prepare(rproc);
return 0;
}
static inline int rproc_unprepare_device(struct rproc *rproc)
{
if (rproc->ops->unprepare)
return rproc->ops->unprepare(rproc);
return 0;
}
static inline int rproc_attach_device(struct rproc *rproc)
{
if (rproc->ops->attach)
return rproc->ops->attach(rproc);
return 0;
}
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->sanity_check)
return rproc->ops->sanity_check(rproc, fw);
return 0;
}
static inline
u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->get_boot_addr)
return rproc->ops->get_boot_addr(rproc, fw);
return 0;
}
static inline
int rproc_load_segments(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->load)
return rproc->ops->load(rproc, fw);
return -EINVAL;
}
static inline int rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->parse_fw)
return rproc->ops->parse_fw(rproc, fw);
return 0;
}
static inline
int rproc_handle_rsc(struct rproc *rproc, u32 rsc_type, void *rsc, int offset,
int avail)
{
if (rproc->ops->handle_rsc)
return rproc->ops->handle_rsc(rproc, rsc_type, rsc, offset,
avail);
return RSC_IGNORED;
}
static inline
struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw)
{
if (rproc->ops->find_loaded_rsc_table)
return rproc->ops->find_loaded_rsc_table(rproc, fw);
return NULL;
}
static inline
struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
size_t *size)
{
if (rproc->ops->get_loaded_rsc_table)
return rproc->ops->get_loaded_rsc_table(rproc, size);
return NULL;
}
static inline
bool rproc_u64_fit_in_size_t(u64 val)
{
if (sizeof(size_t) == sizeof(u64))
return true;
return (val <= (size_t) -1);
}
#endif /* REMOTEPROC_INTERNAL_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010-2013 Bluecherry, LLC <https://www.bluecherrydvr.com>
*
* Original author:
* Ben Collins <[email protected]>
*
* Additional work by:
* John Brooks <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-dma-sg.h>
#include "solo6x10.h"
#include "solo6x10-tw28.h"
#include "solo6x10-jpeg.h"
#define MIN_VID_BUFFERS 2
#define FRAME_BUF_SIZE (400 * 1024)
#define MP4_QS 16
#define DMA_ALIGN 4096
/* 6010 M4V */
static u8 vop_6010_ntsc_d1[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x1d, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x58, 0x10, 0xf0, 0x71, 0x18, 0x3f,
};
static u8 vop_6010_ntsc_cif[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x1d, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x2c, 0x10, 0x78, 0x51, 0x18, 0x3f,
};
static u8 vop_6010_pal_d1[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x15, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x58, 0x11, 0x20, 0x71, 0x18, 0x3f,
};
static u8 vop_6010_pal_cif[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x15, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
0x1f, 0x4c, 0x2c, 0x10, 0x90, 0x51, 0x18, 0x3f,
};
/* 6110 h.264 */
static u8 vop_6110_ntsc_d1[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x05, 0x81, 0xec, 0x80, 0x00, 0x00,
0x00, 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00,
};
static u8 vop_6110_ntsc_cif[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x0b, 0x0f, 0xc8, 0x00, 0x00, 0x00,
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
};
static u8 vop_6110_pal_d1[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x05, 0x80, 0x93, 0x20, 0x00, 0x00,
0x00, 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00,
};
static u8 vop_6110_pal_cif[] = {
0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x1e,
0x9a, 0x74, 0x0b, 0x04, 0xb2, 0x00, 0x00, 0x00,
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
};
typedef __le32 vop_header[16];
struct solo_enc_buf {
enum solo_enc_types type;
const vop_header *vh;
int motion;
};
static int solo_is_motion_on(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
return (solo_dev->motion_mask >> solo_enc->ch) & 1;
}
static int solo_motion_detected(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
unsigned long flags;
u32 ch_mask = 1 << solo_enc->ch;
int ret = 0;
spin_lock_irqsave(&solo_enc->motion_lock, flags);
if (solo_reg_read(solo_dev, SOLO_VI_MOT_STATUS) & ch_mask) {
solo_reg_write(solo_dev, SOLO_VI_MOT_CLEAR, ch_mask);
ret = 1;
}
spin_unlock_irqrestore(&solo_enc->motion_lock, flags);
return ret;
}
static void solo_motion_toggle(struct solo_enc_dev *solo_enc, int on)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
u32 mask = 1 << solo_enc->ch;
unsigned long flags;
spin_lock_irqsave(&solo_enc->motion_lock, flags);
if (on)
solo_dev->motion_mask |= mask;
else
solo_dev->motion_mask &= ~mask;
solo_reg_write(solo_dev, SOLO_VI_MOT_CLEAR, mask);
solo_reg_write(solo_dev, SOLO_VI_MOT_ADR,
SOLO_VI_MOTION_EN(solo_dev->motion_mask) |
(SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
spin_unlock_irqrestore(&solo_enc->motion_lock, flags);
}
void solo_update_mode(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
int vop_len;
u8 *vop;
solo_enc->interlaced = (solo_enc->mode & 0x08) ? 1 : 0;
solo_enc->bw_weight = max(solo_dev->fps / solo_enc->interval, 1);
if (solo_enc->mode == SOLO_ENC_MODE_CIF) {
solo_enc->width = solo_dev->video_hsize >> 1;
solo_enc->height = solo_dev->video_vsize;
if (solo_dev->type == SOLO_DEV_6110) {
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
vop = vop_6110_ntsc_cif;
vop_len = sizeof(vop_6110_ntsc_cif);
} else {
vop = vop_6110_pal_cif;
vop_len = sizeof(vop_6110_pal_cif);
}
} else {
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
vop = vop_6010_ntsc_cif;
vop_len = sizeof(vop_6010_ntsc_cif);
} else {
vop = vop_6010_pal_cif;
vop_len = sizeof(vop_6010_pal_cif);
}
}
} else {
solo_enc->width = solo_dev->video_hsize;
solo_enc->height = solo_dev->video_vsize << 1;
solo_enc->bw_weight <<= 2;
if (solo_dev->type == SOLO_DEV_6110) {
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
vop = vop_6110_ntsc_d1;
vop_len = sizeof(vop_6110_ntsc_d1);
} else {
vop = vop_6110_pal_d1;
vop_len = sizeof(vop_6110_pal_d1);
}
} else {
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
vop = vop_6010_ntsc_d1;
vop_len = sizeof(vop_6010_ntsc_d1);
} else {
vop = vop_6010_pal_d1;
vop_len = sizeof(vop_6010_pal_d1);
}
}
}
memcpy(solo_enc->vop, vop, vop_len);
/* Some fixups for 6010/M4V */
if (solo_dev->type == SOLO_DEV_6010) {
u16 fps = solo_dev->fps * 1000;
u16 interval = solo_enc->interval * 1000;
vop = solo_enc->vop;
/* Frame rate and interval */
vop[22] = fps >> 4;
vop[23] = ((fps << 4) & 0xf0) | 0x0c
| ((interval >> 13) & 0x3);
vop[24] = (interval >> 5) & 0xff;
vop[25] = ((interval << 3) & 0xf8) | 0x04;
}
solo_enc->vop_len = vop_len;
/* Now handle the jpeg header */
vop = solo_enc->jpeg_header;
vop[SOF0_START + 5] = 0xff & (solo_enc->height >> 8);
vop[SOF0_START + 6] = 0xff & solo_enc->height;
vop[SOF0_START + 7] = 0xff & (solo_enc->width >> 8);
vop[SOF0_START + 8] = 0xff & solo_enc->width;
memcpy(vop + DQT_START,
jpeg_dqt[solo_g_jpeg_qp(solo_dev, solo_enc->ch)], DQT_LEN);
}
static int solo_enc_on(struct solo_enc_dev *solo_enc)
{
u8 ch = solo_enc->ch;
struct solo_dev *solo_dev = solo_enc->solo_dev;
u8 interval;
solo_update_mode(solo_enc);
/* Make sure to do a bandwidth check */
if (solo_enc->bw_weight > solo_dev->enc_bw_remain)
return -EBUSY;
solo_enc->sequence = 0;
solo_dev->enc_bw_remain -= solo_enc->bw_weight;
if (solo_enc->type == SOLO_ENC_TYPE_EXT)
solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(ch), 1);
/* Disable all encoding for this channel */
solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), 0);
/* Common for both std and ext encoding */
solo_reg_write(solo_dev, SOLO_VE_CH_INTL(ch),
solo_enc->interlaced ? 1 : 0);
if (solo_enc->interlaced)
interval = solo_enc->interval - 1;
else
interval = solo_enc->interval;
/* Standard encoding only */
solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), solo_enc->gop);
solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp);
solo_reg_write(solo_dev, SOLO_CAP_CH_INTV(ch), interval);
/* Extended encoding only */
solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(ch), solo_enc->gop);
solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp);
solo_reg_write(solo_dev, SOLO_CAP_CH_INTV_E(ch), interval);
/* Enables the standard encoder */
solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), solo_enc->mode);
return 0;
}
static void solo_enc_off(struct solo_enc_dev *solo_enc)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
solo_dev->enc_bw_remain += solo_enc->bw_weight;
solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(solo_enc->ch), 0);
solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0);
}
static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma,
unsigned int off, unsigned int size)
{
int ret;
if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
return -EINVAL;
/* Single shot */
if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) {
return solo_p2m_dma_t(solo_dev, 0, dma,
SOLO_MP4E_EXT_ADDR(solo_dev) + off, size,
0, 0);
}
/* Buffer wrap */
ret = solo_p2m_dma_t(solo_dev, 0, dma,
SOLO_MP4E_EXT_ADDR(solo_dev) + off,
SOLO_MP4E_EXT_SIZE(solo_dev) - off, 0, 0);
if (!ret) {
ret = solo_p2m_dma_t(solo_dev, 0,
dma + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
SOLO_MP4E_EXT_ADDR(solo_dev),
size + off - SOLO_MP4E_EXT_SIZE(solo_dev), 0, 0);
}
return ret;
}
/* Build a descriptor queue out of an SG list and send it to the P2M for
* processing. */
static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
struct sg_table *vbuf, int off, int size,
unsigned int base, unsigned int base_size)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct scatterlist *sg;
int i;
int ret;
if (WARN_ON_ONCE(size > FRAME_BUF_SIZE))
return -EINVAL;
solo_enc->desc_count = 1;
for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
struct solo_p2m_desc *desc;
dma_addr_t dma;
int len;
int left = base_size - off;
desc = &solo_enc->desc_items[solo_enc->desc_count++];
dma = sg_dma_address(sg);
len = sg_dma_len(sg);
/* We assume this is smaller than the scatter size */
BUG_ON(skip >= len);
if (skip) {
len -= skip;
dma += skip;
size -= skip;
skip = 0;
}
len = min(len, size);
if (len <= left) {
/* Single descriptor */
solo_p2m_fill_desc(desc, 0, dma, base + off,
len, 0, 0);
} else {
/* Buffer wrap */
/* XXX: Do these as separate DMA requests, to avoid
timeout errors triggered by awkwardly sized
descriptors. See
<https://github.com/bluecherrydvr/solo6x10/issues/8>
*/
ret = solo_p2m_dma_t(solo_dev, 0, dma, base + off,
left, 0, 0);
if (ret)
return ret;
ret = solo_p2m_dma_t(solo_dev, 0, dma + left, base,
len - left, 0, 0);
if (ret)
return ret;
solo_enc->desc_count--;
}
size -= len;
if (size <= 0)
break;
off += len;
if (off >= base_size)
off -= base_size;
/* Because we may use two descriptors per loop */
if (solo_enc->desc_count >= (solo_enc->desc_nelts - 1)) {
ret = solo_p2m_dma_desc(solo_dev, solo_enc->desc_items,
solo_enc->desc_dma,
solo_enc->desc_count - 1);
if (ret)
return ret;
solo_enc->desc_count = 1;
}
}
if (solo_enc->desc_count <= 1)
return 0;
return solo_p2m_dma_desc(solo_dev, solo_enc->desc_items,
solo_enc->desc_dma, solo_enc->desc_count - 1);
}
/* Extract values from VOP header - VE_STATUSxx */
static inline __always_unused int vop_interlaced(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[0]) >> 30) & 1;
}
static inline __always_unused u8 vop_channel(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F;
}
static inline u8 vop_type(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[0]) >> 22) & 3;
}
static inline u32 vop_mpeg_size(const vop_header *vh)
{
return __le32_to_cpu((*vh)[0]) & 0xFFFFF;
}
static inline u8 __always_unused vop_hsize(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF;
}
static inline u8 __always_unused vop_vsize(const vop_header *vh)
{
return __le32_to_cpu((*vh)[1]) & 0xFF;
}
static inline u32 vop_mpeg_offset(const vop_header *vh)
{
return __le32_to_cpu((*vh)[2]);
}
static inline u32 vop_jpeg_offset(const vop_header *vh)
{
return __le32_to_cpu((*vh)[3]);
}
static inline u32 vop_jpeg_size(const vop_header *vh)
{
return __le32_to_cpu((*vh)[4]) & 0xFFFFF;
}
static inline u32 __always_unused vop_sec(const vop_header *vh)
{
return __le32_to_cpu((*vh)[5]);
}
static inline __always_unused u32 vop_usec(const vop_header *vh)
{
return __le32_to_cpu((*vh)[6]);
}
static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, const vop_header *vh)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int frame_size;
vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
return solo_send_desc(solo_enc, solo_enc->jpeg_len, sgt,
vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
SOLO_JPEG_EXT_SIZE(solo_dev));
}
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, const vop_header *vh)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size;
int skip = 0;
if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
return -EIO;
/* If this is a key frame, add extra header */
vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
if (!vop_type(vh)) {
skip = solo_enc->vop_len;
vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
solo_enc->vop_len);
} else {
vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
/* Now get the actual mpeg payload */
frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) +
sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
return solo_send_desc(solo_enc, skip, sgt, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev));
}
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
const vop_header *vh = enc_buf->vh;
int ret;
switch (solo_enc->fmt) {
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_H264:
ret = solo_fill_mpeg(solo_enc, vb, vh);
break;
default: /* V4L2_PIX_FMT_MJPEG */
ret = solo_fill_jpeg(solo_enc, vb, vh);
break;
}
if (!ret) {
vbuf->sequence = solo_enc->sequence++;
vb->timestamp = ktime_get_ns();
/* Check for motion flags */
if (solo_is_motion_on(solo_enc) && enc_buf->motion) {
struct v4l2_event ev = {
.type = V4L2_EVENT_MOTION_DET,
.u.motion_det = {
.flags
= V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
.frame_sequence = vbuf->sequence,
.region_mask = enc_buf->motion ? 1 : 0,
},
};
v4l2_event_queue(solo_enc->vfd, &ev);
}
}
vb2_buffer_done(vb, ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
return ret;
}
static void solo_enc_handle_one(struct solo_enc_dev *solo_enc,
struct solo_enc_buf *enc_buf)
{
struct solo_vb2_buf *vb;
unsigned long flags;
mutex_lock(&solo_enc->lock);
if (solo_enc->type != enc_buf->type)
goto unlock;
spin_lock_irqsave(&solo_enc->av_lock, flags);
if (list_empty(&solo_enc->vidq_active)) {
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
goto unlock;
}
vb = list_first_entry(&solo_enc->vidq_active, struct solo_vb2_buf,
list);
list_del(&vb->list);
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
solo_enc_fillbuf(solo_enc, &vb->vb.vb2_buf, enc_buf);
unlock:
mutex_unlock(&solo_enc->lock);
}
void solo_enc_v4l2_isr(struct solo_dev *solo_dev)
{
wake_up_interruptible_all(&solo_dev->ring_thread_wait);
}
static void solo_handle_ring(struct solo_dev *solo_dev)
{
for (;;) {
struct solo_enc_dev *solo_enc;
struct solo_enc_buf enc_buf;
u32 mpeg_current, off;
u8 ch;
u8 cur_q;
/* Check if the hardware has any new ones in the queue */
cur_q = solo_reg_read(solo_dev, SOLO_VE_STATE(11)) & 0xff;
if (cur_q == solo_dev->enc_idx)
break;
mpeg_current = solo_reg_read(solo_dev,
SOLO_VE_MPEG4_QUE(solo_dev->enc_idx));
solo_dev->enc_idx = (solo_dev->enc_idx + 1) % MP4_QS;
ch = (mpeg_current >> 24) & 0x1f;
off = mpeg_current & 0x00ffffff;
if (ch >= SOLO_MAX_CHANNELS) {
ch -= SOLO_MAX_CHANNELS;
enc_buf.type = SOLO_ENC_TYPE_EXT;
} else
enc_buf.type = SOLO_ENC_TYPE_STD;
solo_enc = solo_dev->v4l2_enc[ch];
if (solo_enc == NULL) {
dev_err(&solo_dev->pdev->dev,
"Got spurious packet for channel %d\n", ch);
continue;
}
/* FAIL... */
if (enc_get_mpeg_dma(solo_dev, solo_dev->vh_dma, off,
sizeof(vop_header)))
continue;
enc_buf.vh = solo_dev->vh_buf;
/* Sanity check */
if (vop_mpeg_offset(enc_buf.vh) !=
SOLO_MP4E_EXT_ADDR(solo_dev) + off)
continue;
if (solo_motion_detected(solo_enc))
enc_buf.motion = 1;
else
enc_buf.motion = 0;
solo_enc_handle_one(solo_enc, &enc_buf);
}
}
static int solo_ring_thread(void *data)
{
struct solo_dev *solo_dev = data;
DECLARE_WAITQUEUE(wait, current);
set_freezable();
add_wait_queue(&solo_dev->ring_thread_wait, &wait);
for (;;) {
long timeout = schedule_timeout_interruptible(HZ);
if (timeout == -ERESTARTSYS || kthread_should_stop())
break;
solo_handle_ring(solo_dev);
try_to_freeze();
}
remove_wait_queue(&solo_dev->ring_thread_wait, &wait);
return 0;
}
static int solo_enc_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
struct device *alloc_devs[])
{
sizes[0] = FRAME_BUF_SIZE;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
*num_buffers = MIN_VID_BUFFERS;
return 0;
}
static void solo_enc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vq);
struct solo_vb2_buf *solo_vb =
container_of(vbuf, struct solo_vb2_buf, vb);
spin_lock(&solo_enc->av_lock);
list_add_tail(&solo_vb->list, &solo_enc->vidq_active);
spin_unlock(&solo_enc->av_lock);
}
static int solo_ring_start(struct solo_dev *solo_dev)
{
solo_dev->ring_thread = kthread_run(solo_ring_thread, solo_dev,
SOLO6X10_NAME "_ring");
if (IS_ERR(solo_dev->ring_thread)) {
int err = PTR_ERR(solo_dev->ring_thread);
solo_dev->ring_thread = NULL;
return err;
}
solo_irq_on(solo_dev, SOLO_IRQ_ENCODER);
return 0;
}
static void solo_ring_stop(struct solo_dev *solo_dev)
{
if (solo_dev->ring_thread) {
kthread_stop(solo_dev->ring_thread);
solo_dev->ring_thread = NULL;
}
solo_irq_off(solo_dev, SOLO_IRQ_ENCODER);
}
static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
return solo_enc_on(solo_enc);
}
static void solo_enc_stop_streaming(struct vb2_queue *q)
{
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
unsigned long flags;
spin_lock_irqsave(&solo_enc->av_lock, flags);
solo_enc_off(solo_enc);
while (!list_empty(&solo_enc->vidq_active)) {
struct solo_vb2_buf *buf = list_entry(
solo_enc->vidq_active.next,
struct solo_vb2_buf, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
}
static void solo_enc_buf_finish(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
switch (solo_enc->fmt) {
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_H264:
if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
sg_copy_from_buffer(sgt->sgl, sgt->nents,
solo_enc->vop, solo_enc->vop_len);
break;
default: /* V4L2_PIX_FMT_MJPEG */
sg_copy_from_buffer(sgt->sgl, sgt->nents,
solo_enc->jpeg_header, solo_enc->jpeg_len);
break;
}
}
static const struct vb2_ops solo_enc_video_qops = {
.queue_setup = solo_enc_queue_setup,
.buf_queue = solo_enc_buf_queue,
.buf_finish = solo_enc_buf_finish,
.start_streaming = solo_enc_start_streaming,
.stop_streaming = solo_enc_stop_streaming,
};
static int solo_enc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
strscpy(cap->driver, SOLO6X10_NAME, sizeof(cap->driver));
snprintf(cap->card, sizeof(cap->card), "Softlogic 6x10 Enc %d",
solo_enc->ch);
return 0;
}
static int solo_enc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
if (input->index)
return -EINVAL;
snprintf(input->name, sizeof(input->name), "Encoder %d",
solo_enc->ch + 1);
input->type = V4L2_INPUT_TYPE_CAMERA;
input->std = solo_enc->vfd->tvnorms;
if (!tw28_get_video_status(solo_dev, solo_enc->ch))
input->status = V4L2_IN_ST_NO_SIGNAL;
return 0;
}
static int solo_enc_set_input(struct file *file, void *priv,
unsigned int index)
{
if (index)
return -EINVAL;
return 0;
}
static int solo_enc_get_input(struct file *file, void *priv,
unsigned int *index)
{
*index = 0;
return 0;
}
static int solo_enc_enum_fmt_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
int dev_type = solo_enc->solo_dev->type;
switch (f->index) {
case 0:
switch (dev_type) {
case SOLO_DEV_6010:
f->pixelformat = V4L2_PIX_FMT_MPEG4;
break;
case SOLO_DEV_6110:
f->pixelformat = V4L2_PIX_FMT_H264;
break;
}
break;
case 1:
f->pixelformat = V4L2_PIX_FMT_MJPEG;
break;
default:
return -EINVAL;
}
return 0;
}
static inline int solo_valid_pixfmt(u32 pixfmt, int dev_type)
{
return (pixfmt == V4L2_PIX_FMT_H264 && dev_type == SOLO_DEV_6110)
|| (pixfmt == V4L2_PIX_FMT_MPEG4 && dev_type == SOLO_DEV_6010)
|| pixfmt == V4L2_PIX_FMT_MJPEG ? 0 : -EINVAL;
}
static int solo_enc_try_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
if (solo_valid_pixfmt(pix->pixelformat, solo_dev->type))
return -EINVAL;
if (pix->width < solo_dev->video_hsize ||
pix->height < solo_dev->video_vsize << 1) {
/* Default to CIF 1/2 size */
pix->width = solo_dev->video_hsize >> 1;
pix->height = solo_dev->video_vsize;
} else {
/* Full frame */
pix->width = solo_dev->video_hsize;
pix->height = solo_dev->video_vsize << 1;
}
switch (pix->field) {
case V4L2_FIELD_NONE:
case V4L2_FIELD_INTERLACED:
break;
case V4L2_FIELD_ANY:
default:
pix->field = V4L2_FIELD_INTERLACED;
break;
}
/* Just set these */
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->sizeimage = FRAME_BUF_SIZE;
pix->bytesperline = 0;
return 0;
}
static int solo_enc_set_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
if (vb2_is_busy(&solo_enc->vidq))
return -EBUSY;
ret = solo_enc_try_fmt_cap(file, priv, f);
if (ret)
return ret;
if (pix->width == solo_dev->video_hsize)
solo_enc->mode = SOLO_ENC_MODE_D1;
else
solo_enc->mode = SOLO_ENC_MODE_CIF;
/* This does not change the encoder at all */
solo_enc->fmt = pix->pixelformat;
/*
* More information is needed about these 'extended' types. As far
* as I can tell these are basically additional video streams with
* different MPEG encoding attributes that can run in parallel with
* the main stream. If so, then this should be implemented as a
* second video node. Abusing priv like this is certainly not the
* right approach.
if (pix->priv)
solo_enc->type = SOLO_ENC_TYPE_EXT;
*/
solo_update_mode(solo_enc);
return 0;
}
static int solo_enc_get_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
pix->width = solo_enc->width;
pix->height = solo_enc->height;
pix->pixelformat = solo_enc->fmt;
pix->field = solo_enc->interlaced ? V4L2_FIELD_INTERLACED :
V4L2_FIELD_NONE;
pix->sizeimage = FRAME_BUF_SIZE;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
static int solo_enc_g_std(struct file *file, void *priv, v4l2_std_id *i)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
*i = V4L2_STD_NTSC_M;
else
*i = V4L2_STD_PAL;
return 0;
}
static int solo_enc_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_625_50);
}
static int solo_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
if (solo_valid_pixfmt(fsize->pixel_format, solo_dev->type))
return -EINVAL;
switch (fsize->index) {
case 0:
fsize->discrete.width = solo_dev->video_hsize >> 1;
fsize->discrete.height = solo_dev->video_vsize;
break;
case 1:
fsize->discrete.width = solo_dev->video_hsize;
fsize->discrete.height = solo_dev->video_vsize << 1;
break;
default:
return -EINVAL;
}
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
return 0;
}
static int solo_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fintv)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_dev *solo_dev = solo_enc->solo_dev;
if (solo_valid_pixfmt(fintv->pixel_format, solo_dev->type))
return -EINVAL;
if (fintv->index)
return -EINVAL;
if ((fintv->width != solo_dev->video_hsize >> 1 ||
fintv->height != solo_dev->video_vsize) &&
(fintv->width != solo_dev->video_hsize ||
fintv->height != solo_dev->video_vsize << 1))
return -EINVAL;
fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
fintv->stepwise.min.numerator = 1;
fintv->stepwise.min.denominator = solo_dev->fps;
fintv->stepwise.max.numerator = 15;
fintv->stepwise.max.denominator = solo_dev->fps;
fintv->stepwise.step.numerator = 1;
fintv->stepwise.step.denominator = solo_dev->fps;
return 0;
}
static int solo_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = solo_enc->interval;
cp->timeperframe.denominator = solo_enc->solo_dev->fps;
cp->capturemode = 0;
/* XXX: Shouldn't we be able to get/set this from vb2? */
cp->readbuffers = 2;
return 0;
}
static inline int calc_interval(u8 fps, u32 n, u32 d)
{
if (!n || !d)
return 1;
if (d == fps)
return n;
n *= fps;
return min(15U, n / d + (n % d >= (fps >> 1)));
}
static int solo_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct v4l2_fract *t = &sp->parm.capture.timeperframe;
u8 fps = solo_enc->solo_dev->fps;
if (vb2_is_streaming(&solo_enc->vidq))
return -EBUSY;
solo_enc->interval = calc_interval(fps, t->numerator, t->denominator);
solo_update_mode(solo_enc);
return solo_g_parm(file, priv, sp);
}
static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct solo_enc_dev *solo_enc =
container_of(ctrl->handler, struct solo_enc_dev, hdl);
struct solo_dev *solo_dev = solo_enc->solo_dev;
int err;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_CONTRAST:
case V4L2_CID_SATURATION:
case V4L2_CID_HUE:
case V4L2_CID_SHARPNESS:
return tw28_set_ctrl_val(solo_dev, ctrl->id, solo_enc->ch,
ctrl->val);
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
solo_enc->gop = ctrl->val;
solo_reg_write(solo_dev, SOLO_VE_CH_GOP(solo_enc->ch), solo_enc->gop);
solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(solo_enc->ch), solo_enc->gop);
return 0;
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
solo_enc->qp = ctrl->val;
solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp);
solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp);
return 0;
case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
solo_enc->motion_thresh = ctrl->val << 8;
if (!solo_enc->motion_global || !solo_enc->motion_enabled)
return 0;
return solo_set_motion_threshold(solo_dev, solo_enc->ch,
solo_enc->motion_thresh);
case V4L2_CID_DETECT_MD_MODE:
solo_enc->motion_global = ctrl->val == V4L2_DETECT_MD_MODE_GLOBAL;
solo_enc->motion_enabled = ctrl->val > V4L2_DETECT_MD_MODE_DISABLED;
if (ctrl->val) {
if (solo_enc->motion_global)
err = solo_set_motion_threshold(solo_dev, solo_enc->ch,
solo_enc->motion_thresh);
else
err = solo_set_motion_block(solo_dev, solo_enc->ch,
solo_enc->md_thresholds->p_cur.p_u16);
if (err)
return err;
}
solo_motion_toggle(solo_enc, ctrl->val);
return 0;
case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
if (solo_enc->motion_enabled && !solo_enc->motion_global)
return solo_set_motion_block(solo_dev, solo_enc->ch,
solo_enc->md_thresholds->p_new.p_u16);
break;
case V4L2_CID_OSD_TEXT:
strscpy(solo_enc->osd_text, ctrl->p_new.p_char,
sizeof(solo_enc->osd_text));
return solo_osd_print(solo_enc);
default:
return -EINVAL;
}
return 0;
}
static int solo_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_MOTION_DET:
/* Allow for up to 30 events (1 second for NTSC) to be
* stored. */
return v4l2_event_subscribe(fh, sub, 30, NULL);
default:
return v4l2_ctrl_subscribe_event(fh, sub);
}
}
static const struct v4l2_file_operations solo_enc_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops solo_enc_ioctl_ops = {
.vidioc_querycap = solo_enc_querycap,
.vidioc_s_std = solo_enc_s_std,
.vidioc_g_std = solo_enc_g_std,
/* Input callbacks */
.vidioc_enum_input = solo_enc_enum_input,
.vidioc_s_input = solo_enc_set_input,
.vidioc_g_input = solo_enc_get_input,
/* Video capture format callbacks */
.vidioc_enum_fmt_vid_cap = solo_enc_enum_fmt_cap,
.vidioc_try_fmt_vid_cap = solo_enc_try_fmt_cap,
.vidioc_s_fmt_vid_cap = solo_enc_set_fmt_cap,
.vidioc_g_fmt_vid_cap = solo_enc_get_fmt_cap,
/* Streaming I/O */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
/* Frame size and interval */
.vidioc_enum_framesizes = solo_enum_framesizes,
.vidioc_enum_frameintervals = solo_enum_frameintervals,
/* Video capture parameters */
.vidioc_s_parm = solo_s_parm,
.vidioc_g_parm = solo_g_parm,
/* Logging and events */
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = solo_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct video_device solo_enc_template = {
.name = SOLO6X10_NAME,
.fops = &solo_enc_fops,
.ioctl_ops = &solo_enc_ioctl_ops,
.minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
static const struct v4l2_ctrl_ops solo_ctrl_ops = {
.s_ctrl = solo_s_ctrl,
};
static const struct v4l2_ctrl_config solo_osd_text_ctrl = {
.ops = &solo_ctrl_ops,
.id = V4L2_CID_OSD_TEXT,
.name = "OSD Text",
.type = V4L2_CTRL_TYPE_STRING,
.max = OSD_TEXT_MAX,
.step = 1,
};
/* Motion Detection Threshold matrix */
static const struct v4l2_ctrl_config solo_md_thresholds = {
.ops = &solo_ctrl_ops,
.id = V4L2_CID_DETECT_MD_THRESHOLD_GRID,
.dims = { SOLO_MOTION_SZ, SOLO_MOTION_SZ },
.def = SOLO_DEF_MOT_THRESH,
.max = 65535,
.step = 1,
};
static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
u8 ch, unsigned nr)
{
struct solo_enc_dev *solo_enc;
struct v4l2_ctrl_handler *hdl;
int ret;
solo_enc = kzalloc(sizeof(*solo_enc), GFP_KERNEL);
if (!solo_enc)
return ERR_PTR(-ENOMEM);
hdl = &solo_enc->hdl;
v4l2_ctrl_handler_init(hdl, 10);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_HUE, 0, 255, 1, 128);
if (tw28_has_sharpness(solo_dev, ch))
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_SHARPNESS, 0, 15, 1, 0);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 255, 1, solo_dev->fps);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 31, 1, SOLO_DEFAULT_QP);
v4l2_ctrl_new_std_menu(hdl, &solo_ctrl_ops,
V4L2_CID_DETECT_MD_MODE,
V4L2_DETECT_MD_MODE_THRESHOLD_GRID, 0,
V4L2_DETECT_MD_MODE_DISABLED);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD, 0, 0xff, 1,
SOLO_DEF_MOT_THRESH >> 8);
v4l2_ctrl_new_custom(hdl, &solo_osd_text_ctrl, NULL);
solo_enc->md_thresholds =
v4l2_ctrl_new_custom(hdl, &solo_md_thresholds, NULL);
if (hdl->error) {
ret = hdl->error;
goto hdl_free;
}
solo_enc->solo_dev = solo_dev;
solo_enc->ch = ch;
mutex_init(&solo_enc->lock);
spin_lock_init(&solo_enc->av_lock);
INIT_LIST_HEAD(&solo_enc->vidq_active);
solo_enc->fmt = (solo_dev->type == SOLO_DEV_6010) ?
V4L2_PIX_FMT_MPEG4 : V4L2_PIX_FMT_H264;
solo_enc->type = SOLO_ENC_TYPE_STD;
solo_enc->qp = SOLO_DEFAULT_QP;
solo_enc->gop = solo_dev->fps;
solo_enc->interval = 1;
solo_enc->mode = SOLO_ENC_MODE_CIF;
solo_enc->motion_global = true;
solo_enc->motion_thresh = SOLO_DEF_MOT_THRESH;
solo_enc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
solo_enc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
solo_enc->vidq.ops = &solo_enc_video_qops;
solo_enc->vidq.mem_ops = &vb2_dma_sg_memops;
solo_enc->vidq.drv_priv = solo_enc;
solo_enc->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
solo_enc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
solo_enc->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_enc->vidq.lock = &solo_enc->lock;
solo_enc->vidq.dev = &solo_dev->pdev->dev;
ret = vb2_queue_init(&solo_enc->vidq);
if (ret)
goto hdl_free;
solo_update_mode(solo_enc);
spin_lock_init(&solo_enc->motion_lock);
/* Initialize this per encoder */
solo_enc->jpeg_len = sizeof(jpeg_header);
memcpy(solo_enc->jpeg_header, jpeg_header, solo_enc->jpeg_len);
solo_enc->desc_nelts = 32;
solo_enc->desc_items = dma_alloc_coherent(&solo_dev->pdev->dev,
sizeof(struct solo_p2m_desc) *
solo_enc->desc_nelts,
&solo_enc->desc_dma,
GFP_KERNEL);
ret = -ENOMEM;
if (solo_enc->desc_items == NULL)
goto hdl_free;
solo_enc->vfd = video_device_alloc();
if (!solo_enc->vfd)
goto pci_free;
*solo_enc->vfd = solo_enc_template;
solo_enc->vfd->v4l2_dev = &solo_dev->v4l2_dev;
solo_enc->vfd->ctrl_handler = hdl;
solo_enc->vfd->queue = &solo_enc->vidq;
solo_enc->vfd->lock = &solo_enc->lock;
video_set_drvdata(solo_enc->vfd, solo_enc);
ret = video_register_device(solo_enc->vfd, VFL_TYPE_VIDEO, nr);
if (ret < 0)
goto vdev_release;
snprintf(solo_enc->vfd->name, sizeof(solo_enc->vfd->name),
"%s-enc (%i/%i)", SOLO6X10_NAME, solo_dev->vfd->num,
solo_enc->vfd->num);
return solo_enc;
vdev_release:
video_device_release(solo_enc->vfd);
pci_free:
dma_free_coherent(&solo_enc->solo_dev->pdev->dev,
sizeof(struct solo_p2m_desc) * solo_enc->desc_nelts,
solo_enc->desc_items, solo_enc->desc_dma);
hdl_free:
v4l2_ctrl_handler_free(hdl);
kfree(solo_enc);
return ERR_PTR(ret);
}
static void solo_enc_free(struct solo_enc_dev *solo_enc)
{
if (solo_enc == NULL)
return;
dma_free_coherent(&solo_enc->solo_dev->pdev->dev,
sizeof(struct solo_p2m_desc) * solo_enc->desc_nelts,
solo_enc->desc_items, solo_enc->desc_dma);
video_unregister_device(solo_enc->vfd);
v4l2_ctrl_handler_free(&solo_enc->hdl);
kfree(solo_enc);
}
int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
{
int i;
init_waitqueue_head(&solo_dev->ring_thread_wait);
solo_dev->vh_size = sizeof(vop_header);
solo_dev->vh_buf = dma_alloc_coherent(&solo_dev->pdev->dev,
solo_dev->vh_size,
&solo_dev->vh_dma, GFP_KERNEL);
if (solo_dev->vh_buf == NULL)
return -ENOMEM;
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_dev->v4l2_enc[i] = solo_enc_alloc(solo_dev, i, nr);
if (IS_ERR(solo_dev->v4l2_enc[i]))
break;
}
if (i != solo_dev->nr_chans) {
int ret = PTR_ERR(solo_dev->v4l2_enc[i]);
while (i--)
solo_enc_free(solo_dev->v4l2_enc[i]);
dma_free_coherent(&solo_dev->pdev->dev, solo_dev->vh_size,
solo_dev->vh_buf, solo_dev->vh_dma);
solo_dev->vh_buf = NULL;
return ret;
}
if (solo_dev->type == SOLO_DEV_6010)
solo_dev->enc_bw_remain = solo_dev->fps * 4 * 4;
else
solo_dev->enc_bw_remain = solo_dev->fps * 4 * 5;
dev_info(&solo_dev->pdev->dev, "Encoders as /dev/video%d-%d\n",
solo_dev->v4l2_enc[0]->vfd->num,
solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num);
return solo_ring_start(solo_dev);
}
void solo_enc_v4l2_exit(struct solo_dev *solo_dev)
{
int i;
solo_ring_stop(solo_dev);
for (i = 0; i < solo_dev->nr_chans; i++)
solo_enc_free(solo_dev->v4l2_enc[i]);
if (solo_dev->vh_buf)
dma_free_coherent(&solo_dev->pdev->dev, solo_dev->vh_size,
solo_dev->vh_buf, solo_dev->vh_dma);
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KERN_LEVELS_H__
#define __KERN_LEVELS_H__
#define KERN_SOH "" /* ASCII Start Of Header */
#define KERN_SOH_ASCII ''
#define KERN_EMERG KERN_SOH "" /* system is unusable */
#define KERN_ALERT KERN_SOH "" /* action must be taken immediately */
#define KERN_CRIT KERN_SOH "" /* critical conditions */
#define KERN_ERR KERN_SOH "" /* error conditions */
#define KERN_WARNING KERN_SOH "" /* warning conditions */
#define KERN_NOTICE KERN_SOH "" /* normal but significant condition */
#define KERN_INFO KERN_SOH "" /* informational */
#define KERN_DEBUG KERN_SOH "" /* debug-level messages */
#define KERN_DEFAULT KERN_SOH "" /* the default kernel loglevel */
/*
* Annotation for a "continued" line of log printout (only done after a
* line that had no enclosing \n). Only to be used by core/arch code
* during early bootup (a continued line is not SMP-safe otherwise).
*/
#define KERN_CONT ""
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 TOSHIBA CORPORATION
* Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
* Copyright (c) 2020 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include "pinctrl-common.h"
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
#define DSEL_MASK GENMASK(3, 0)
/* private data */
struct visconti_pinctrl {
void __iomem *base;
struct device *dev;
struct pinctrl_dev *pctl;
struct pinctrl_desc pctl_desc;
const struct visconti_pinctrl_devdata *devdata;
spinlock_t lock; /* protect pinctrl register */
};
/* pinconf */
static int visconti_pin_config_set(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *configs,
unsigned int num_configs)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
const struct visconti_desc_pin *pin = &priv->devdata->pins[_pin];
enum pin_config_param param;
unsigned int arg;
int i, ret = 0;
unsigned int val, set_val, pude_val;
unsigned long flags;
dev_dbg(priv->dev, "%s: pin = %d (%s)\n", __func__, _pin, pin->pin.name);
spin_lock_irqsave(&priv->lock, flags);
for (i = 0; i < num_configs; i++) {
set_val = 0;
pude_val = 0;
param = pinconf_to_config_param(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
set_val = 1;
fallthrough;
case PIN_CONFIG_BIAS_PULL_DOWN:
/* update pudsel setting */
val = readl(priv->base + pin->pudsel_offset);
val &= ~BIT(pin->pud_shift);
val |= set_val << pin->pud_shift;
writel(val, priv->base + pin->pudsel_offset);
pude_val = 1;
fallthrough;
case PIN_CONFIG_BIAS_DISABLE:
/* update pude setting */
val = readl(priv->base + pin->pude_offset);
val &= ~BIT(pin->pud_shift);
val |= pude_val << pin->pud_shift;
writel(val, priv->base + pin->pude_offset);
dev_dbg(priv->dev, "BIAS(%d): off = 0x%x val = 0x%x\n",
param, pin->pude_offset, val);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = pinconf_to_config_argument(configs[i]);
dev_dbg(priv->dev, "DRV_STR arg = %d\n", arg);
switch (arg) {
case 2:
case 4:
case 8:
case 16:
case 24:
case 32:
/*
* I/O drive capacity setting:
* 2mA: 0
* 4mA: 1
* 8mA: 3
* 16mA: 7
* 24mA: 11
* 32mA: 15
*/
set_val = DIV_ROUND_CLOSEST(arg, 2) - 1;
break;
default:
ret = -EINVAL;
goto err;
}
/* update drive setting */
val = readl(priv->base + pin->dsel_offset);
val &= ~(DSEL_MASK << pin->dsel_shift);
val |= set_val << pin->dsel_shift;
writel(val, priv->base + pin->dsel_offset);
break;
default:
ret = -EOPNOTSUPP;
goto err;
}
}
err:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static int visconti_pin_config_group_set(struct pinctrl_dev *pctldev,
unsigned int selector,
unsigned long *configs,
unsigned int num_configs)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins;
unsigned int num_pins;
int i, ret;
pins = priv->devdata->groups[selector].pins;
num_pins = priv->devdata->groups[selector].nr_pins;
dev_dbg(priv->dev, "%s: select = %d, n_pin = %d, n_config = %d\n",
__func__, selector, num_pins, num_configs);
for (i = 0; i < num_pins; i++) {
ret = visconti_pin_config_set(pctldev, pins[i],
configs, num_configs);
if (ret)
return ret;
}
return 0;
}
static const struct pinconf_ops visconti_pinconf_ops = {
.is_generic = true,
.pin_config_set = visconti_pin_config_set,
.pin_config_group_set = visconti_pin_config_group_set,
.pin_config_config_dbg_show = pinconf_generic_dump_config,
};
/* pinctrl */
static int visconti_get_groups_count(struct pinctrl_dev *pctldev)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
return priv->devdata->nr_groups;
}
static const char *visconti_get_group_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
return priv->devdata->groups[selector].name;
}
static int visconti_get_group_pins(struct pinctrl_dev *pctldev,
unsigned int selector,
const unsigned int **pins,
unsigned int *num_pins)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
*pins = priv->devdata->groups[selector].pins;
*num_pins = priv->devdata->groups[selector].nr_pins;
return 0;
}
static const struct pinctrl_ops visconti_pinctrl_ops = {
.get_groups_count = visconti_get_groups_count,
.get_group_name = visconti_get_group_name,
.get_group_pins = visconti_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinctrl_utils_free_map,
};
/* pinmux */
static int visconti_get_functions_count(struct pinctrl_dev *pctldev)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
return priv->devdata->nr_functions;
}
static const char *visconti_get_function_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
return priv->devdata->functions[selector].name;
}
static int visconti_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
unsigned * const num_groups)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
*groups = priv->devdata->functions[selector].groups;
*num_groups = priv->devdata->functions[selector].nr_groups;
return 0;
}
static int visconti_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
const struct visconti_pin_function *func = &priv->devdata->functions[function];
const struct visconti_pin_group *grp = &priv->devdata->groups[group];
const struct visconti_mux *mux = &grp->mux;
unsigned int val;
unsigned long flags;
dev_dbg(priv->dev, "%s: function = %d(%s) group = %d(%s)\n", __func__,
function, func->name, group, grp->name);
spin_lock_irqsave(&priv->lock, flags);
/* update mux */
val = readl(priv->base + mux->offset);
val &= ~mux->mask;
val |= mux->val;
writel(val, priv->base + mux->offset);
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(priv->dev, "[%x]: 0x%x\n", mux->offset, val);
return 0;
}
static int visconti_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin)
{
struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
const struct visconti_mux *gpio_mux = &priv->devdata->gpio_mux[pin];
unsigned long flags;
unsigned int val;
dev_dbg(priv->dev, "%s: pin = %d\n", __func__, pin);
/* update mux */
spin_lock_irqsave(&priv->lock, flags);
val = readl(priv->base + gpio_mux->offset);
val &= ~gpio_mux->mask;
val |= gpio_mux->val;
writel(val, priv->base + gpio_mux->offset);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static const struct pinmux_ops visconti_pinmux_ops = {
.get_functions_count = visconti_get_functions_count,
.get_function_name = visconti_get_function_name,
.get_function_groups = visconti_get_function_groups,
.set_mux = visconti_set_mux,
.gpio_request_enable = visconti_gpio_request_enable,
.strict = true,
};
int visconti_pinctrl_probe(struct platform_device *pdev,
const struct visconti_pinctrl_devdata *devdata)
{
struct device *dev = &pdev->dev;
struct visconti_pinctrl *priv;
struct pinctrl_pin_desc *pins;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->devdata = devdata;
spin_lock_init(&priv->lock);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "unable to map I/O space\n");
return PTR_ERR(priv->base);
}
pins = devm_kcalloc(dev, devdata->nr_pins,
sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
for (i = 0; i < devdata->nr_pins; i++)
pins[i] = devdata->pins[i].pin;
priv->pctl_desc.name = dev_name(dev);
priv->pctl_desc.owner = THIS_MODULE;
priv->pctl_desc.pins = pins;
priv->pctl_desc.npins = devdata->nr_pins;
priv->pctl_desc.confops = &visconti_pinconf_ops;
priv->pctl_desc.pctlops = &visconti_pinctrl_ops;
priv->pctl_desc.pmxops = &visconti_pinmux_ops;
ret = devm_pinctrl_register_and_init(dev, &priv->pctl_desc,
priv, &priv->pctl);
if (ret) {
dev_err(dev, "couldn't register pinctrl: %d\n", ret);
return ret;
}
if (devdata->unlock)
devdata->unlock(priv->base);
return pinctrl_enable(priv->pctl);
}
|
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (c) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
#define DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
#define RESET_AO_IR_IN 0
#define RESET_AO_UART 1
#define RESET_AO_I2C_M 2
#define RESET_AO_I2C_S 3
#define RESET_AO_SAR_ADC 4
#define RESET_AO_UART2 5
#define RESET_AO_IR_OUT 6
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the wakeup data structure at the head of the
* wakeup code.
*/
#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/* This must match data at wakeup.S */
struct wakeup_header {
u16 video_mode; /* Video mode number */
u32 pmode_entry; /* Protected mode resume point, 32-bit only */
u16 pmode_cs;
u32 pmode_cr0; /* Protected mode cr0 */
u32 pmode_cr3; /* Protected mode cr3 */
u32 pmode_cr4; /* Protected mode cr4 */
u32 pmode_efer_low; /* Protected mode EFER */
u32 pmode_efer_high;
u64 pmode_gdt;
u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
u32 pmode_misc_en_high;
u32 pmode_behavior; /* Wakeup routine behavior flags */
u32 realmode_flags;
u32 real_magic;
u32 signature; /* To check we have correct structure */
} __attribute__((__packed__));
extern struct wakeup_header wakeup_header;
#endif
#define WAKEUP_HEADER_OFFSET 8
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
/* Wakeup behavior bits */
#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
#define WAKEUP_BEHAVIOR_RESTORE_CR4 1
#define WAKEUP_BEHAVIOR_RESTORE_EFER 2
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023 ARM Ltd.
*/
#ifndef __ASM_GCS_H
#define __ASM_GCS_H
#include <asm/types.h>
#include <asm/uaccess.h>
struct kernel_clone_args;
struct ksignal;
static inline void gcsb_dsync(void)
{
asm volatile(".inst 0xd503227f" : : : "memory");
}
static inline void gcsstr(u64 *addr, u64 val)
{
register u64 *_addr __asm__ ("x0") = addr;
register long _val __asm__ ("x1") = val;
/* GCSSTTR x1, x0 */
asm volatile(
".inst 0xd91f1c01\n"
:
: "rZ" (_val), "r" (_addr)
: "memory");
}
static inline void gcsss1(u64 Xt)
{
asm volatile (
"sys #3, C7, C7, #2, %0\n"
:
: "rZ" (Xt)
: "memory");
}
static inline u64 gcsss2(void)
{
u64 Xt;
asm volatile(
"SYSL %0, #3, C7, C7, #3\n"
: "=r" (Xt)
:
: "memory");
return Xt;
}
#define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK \
(PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE | PR_SHADOW_STACK_PUSH)
#ifdef CONFIG_ARM64_GCS
static inline bool task_gcs_el0_enabled(struct task_struct *task)
{
return current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
}
void gcs_set_el0_mode(struct task_struct *task);
void gcs_free(struct task_struct *task);
void gcs_preserve_current_state(void);
unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args);
static inline int gcs_check_locked(struct task_struct *task,
unsigned long new_val)
{
unsigned long cur_val = task->thread.gcs_el0_mode;
cur_val &= task->thread.gcs_el0_locked;
new_val &= task->thread.gcs_el0_locked;
if (cur_val != new_val)
return -EBUSY;
return 0;
}
#else
static inline bool task_gcs_el0_enabled(struct task_struct *task)
{
return false;
}
static inline void gcs_set_el0_mode(struct task_struct *task) { }
static inline void gcs_free(struct task_struct *task) { }
static inline void gcs_preserve_current_state(void) { }
static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args)
{
return -ENOTSUPP;
}
static inline int gcs_check_locked(struct task_struct *task,
unsigned long new_val)
{
return 0;
}
#endif
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Low-level device IO routines for ST-Ericsson CW1200 drivers
*
* Copyright (c) 2010, ST-Ericsson
* Author: Dmitry Tarnyagin <[email protected]>
*
* Based on:
* ST-Ericsson UMAC CW1200 driver, which is
* Copyright (c) 2010, ST-Ericsson
* Author: Ajitpal Singh <[email protected]>
*/
#include <linux/types.h>
#include "cw1200.h"
#include "hwio.h"
#include "hwbus.h"
/* Sdio addr is 4*spi_addr */
#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
((((buf_id) & 0x1F) << 7) \
| (((mpf) & 1) << 6) \
| (((rfu) & 1) << 5) \
| (((reg_id_ofs) & 0x1F) << 0))
#define MAX_RETRY 3
static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
void *buf, size_t buf_len, int buf_id)
{
u16 addr_sdio;
u32 sdio_reg_addr_17bit;
/* Check if buffer is aligned to 4 byte boundary */
if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
pr_err("buffer is not aligned.\n");
return -EINVAL;
}
/* Convert to SDIO Register Address */
addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv,
sdio_reg_addr_17bit,
buf, buf_len);
}
static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
const void *buf, size_t buf_len, int buf_id)
{
u16 addr_sdio;
u32 sdio_reg_addr_17bit;
/* Convert to SDIO Register Address */
addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv,
sdio_reg_addr_17bit,
buf, buf_len);
}
static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
u16 addr, u32 *val)
{
__le32 tmp;
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
*val = le32_to_cpu(tmp);
return i;
}
static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
u16 addr, u32 val)
{
__le32 tmp = cpu_to_le32(val);
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
}
static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
u16 addr, u16 *val)
{
__le16 tmp;
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
*val = le16_to_cpu(tmp);
return i;
}
static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
u16 addr, u16 val)
{
__le16 tmp = cpu_to_le16(val);
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
}
int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
size_t buf_len)
{
int ret;
priv->hwbus_ops->lock(priv->hwbus_priv);
ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
size_t buf_len)
{
int ret;
priv->hwbus_ops->lock(priv->hwbus_priv);
ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
{
int ret, retry = 1;
int buf_id_rx = priv->buf_id_rx;
priv->hwbus_ops->lock(priv->hwbus_priv);
while (retry <= MAX_RETRY) {
ret = __cw1200_reg_read(priv,
ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
buf_len, buf_id_rx + 1);
if (!ret) {
buf_id_rx = (buf_id_rx + 1) & 3;
priv->buf_id_rx = buf_id_rx;
break;
} else {
retry++;
mdelay(1);
pr_err("error :[%d]\n", ret);
}
}
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_data_write(struct cw1200_common *priv, const void *buf,
size_t buf_len)
{
int ret, retry = 1;
int buf_id_tx = priv->buf_id_tx;
priv->hwbus_ops->lock(priv->hwbus_priv);
while (retry <= MAX_RETRY) {
ret = __cw1200_reg_write(priv,
ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
buf_len, buf_id_tx);
if (!ret) {
buf_id_tx = (buf_id_tx + 1) & 31;
priv->buf_id_tx = buf_id_tx;
break;
} else {
retry++;
mdelay(1);
pr_err("error :[%d]\n", ret);
}
}
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
size_t buf_len, u32 prefetch, u16 port_addr)
{
u32 val32 = 0;
int i, ret;
if ((buf_len / 2) >= 0x1000) {
pr_err("Can't read more than 0xfff words.\n");
return -EINVAL;
}
priv->hwbus_ops->lock(priv->hwbus_priv);
/* Write address */
ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
if (ret < 0) {
pr_err("Can't write address register.\n");
goto out;
}
/* Read CONFIG Register Value - We will read 32 bits */
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't read config register.\n");
goto out;
}
/* Set PREFETCH bit */
ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
val32 | prefetch);
if (ret < 0) {
pr_err("Can't write prefetch bit.\n");
goto out;
}
/* Check for PRE-FETCH bit to be cleared */
for (i = 0; i < 20; i++) {
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't check prefetch bit.\n");
goto out;
}
if (!(val32 & prefetch))
break;
mdelay(i);
}
if (val32 & prefetch) {
pr_err("Prefetch bit is not cleared.\n");
goto out;
}
/* Read data port */
ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
if (ret < 0) {
pr_err("Can't read data port.\n");
goto out;
}
out:
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
size_t buf_len)
{
int ret;
if ((buf_len / 2) >= 0x1000) {
pr_err("Can't write more than 0xfff words.\n");
return -EINVAL;
}
priv->hwbus_ops->lock(priv->hwbus_priv);
/* Write address */
ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
if (ret < 0) {
pr_err("Can't write address register.\n");
goto out;
}
/* Write data port */
ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
buf, buf_len, 0);
if (ret < 0) {
pr_err("Can't write data port.\n");
goto out;
}
out:
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
{
u32 val32;
u16 val16;
int ret;
if (HIF_8601_SILICON == priv->hw_type) {
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't read config register.\n");
return ret;
}
if (enable)
val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE;
else
val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE;
ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32);
if (ret < 0) {
pr_err("Can't write config register.\n");
return ret;
}
} else {
ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
if (ret < 0) {
pr_err("Can't read control register.\n");
return ret;
}
if (enable)
val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE;
else
val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE;
ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16);
if (ret < 0) {
pr_err("Can't write control register.\n");
return ret;
}
}
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <[email protected]>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/pgtable.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
#include <asm/mpc5xxx.h>
#include "fs_enet.h"
#include "fec.h"
/* Make MII read/write commands for the FEC.
*/
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
#define mk_mii_end 0
#define FEC_MII_LOOPS 10000
static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
{
struct fec_info* fec = bus->priv;
struct fec __iomem *fecp = fec->fecp;
int i, ret = -1;
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
for (i = 0; i < FEC_MII_LOOPS; i++)
if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
break;
if (i < FEC_MII_LOOPS) {
out_be32(&fecp->fec_ievent, FEC_ENET_MII);
ret = in_be32(&fecp->fec_mii_data) & 0xffff;
}
return ret;
}
static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
struct fec_info* fec = bus->priv;
struct fec __iomem *fecp = fec->fecp;
int i;
/* this must never happen */
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
for (i = 0; i < FEC_MII_LOOPS; i++)
if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
break;
if (i < FEC_MII_LOOPS)
out_be32(&fecp->fec_ievent, FEC_ENET_MII);
return 0;
}
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
get_bus_freq = device_get_match_data(&ofdev->dev);
new_bus = mdiobus_alloc();
if (!new_bus)
goto out;
fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
if (!fec)
goto out_mii;
new_bus->priv = fec;
new_bus->name = "FEC MII Bus";
new_bus->read = &fs_enet_fec_mii_read;
new_bus->write = &fs_enet_fec_mii_write;
ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
goto out_res;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pap", &res.start);
fec->fecp = ioremap(res.start, resource_size(&res));
if (!fec->fecp) {
ret = -ENOMEM;
goto out_fec;
}
if (get_bus_freq) {
clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
clock = 0x3F * 5000000;
}
} else
clock = ppc_proc_freq;
/*
* Scale for a MII clock <= 2.5 MHz
* Note that only 6 bits (25:30) are available for MII speed.
*/
speed = (clock + 4999999) / 5000000;
if (speed > 0x3F) {
speed = 0x3F;
dev_err(&ofdev->dev,
"MII clock (%d Hz) exceeds max (2.5 MHz)\n",
clock / speed);
}
fec->mii_speed = speed << 1;
setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
FEC_ECNTRL_ETHER_EN);
out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
new_bus->phy_mask = ~0;
new_bus->parent = &ofdev->dev;
platform_set_drvdata(ofdev, new_bus);
ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
goto out_unmap_regs;
return 0;
out_unmap_regs:
iounmap(fec->fecp);
out_res:
out_fec:
kfree(fec);
out_mii:
mdiobus_free(new_bus);
out:
return ret;
}
static void fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct fec_info *fec = bus->priv;
mdiobus_unregister(bus);
iounmap(fec->fecp);
kfree(fec);
mdiobus_free(bus);
}
static const struct of_device_id fs_enet_mdio_fec_match[] = {
{
.compatible = "fsl,pq1-fec-mdio",
},
#if defined(CONFIG_PPC_MPC512x)
{
.compatible = "fsl,mpc5121-fec-mdio",
.data = mpc5xxx_get_bus_frequency,
},
#endif
{},
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct platform_driver fs_enet_fec_mdio_driver = {
.driver = {
.name = "fsl-fec-mdio",
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PERF_CPUID_H
#define PERF_CPUID_H 1
static inline void
cpuid(unsigned int op, unsigned int op2, unsigned int *a, unsigned int *b,
unsigned int *c, unsigned int *d)
{
/*
* Preserve %ebx/%rbx register by either placing it in %rdi or saving it
* on the stack - x86-64 needs to avoid the stack red zone. In PIC
* compilations %ebx contains the address of the global offset
* table. %rbx is occasionally used to address stack variables in
* presence of dynamic allocas.
*/
asm(
#if defined(__x86_64__)
"mov %%rbx, %%rdi\n"
"cpuid\n"
"xchg %%rdi, %%rbx\n"
#else
"pushl %%ebx\n"
"cpuid\n"
"movl %%ebx, %%edi\n"
"popl %%ebx\n"
#endif
: "=a"(*a), "=D"(*b), "=c"(*c), "=d"(*d)
: "a"(op), "2"(op2));
}
void get_cpuid_0(char *vendor, unsigned int *lvl);
#endif
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* Copyright(c) 2022 Intel Corporation
*/
#ifndef __SOUND_SOC_SOF_IPC4_PRIV_H
#define __SOUND_SOC_SOF_IPC4_PRIV_H
#include <linux/idr.h>
#include <sound/sof/ext_manifest4.h>
#include "sof-priv.h"
/* The DSP window indices are fixed */
#define SOF_IPC4_INBOX_WINDOW_IDX 0
#define SOF_IPC4_OUTBOX_WINDOW_IDX 1
#define SOF_IPC4_DEBUG_WINDOW_IDX 2
enum sof_ipc4_mtrace_type {
SOF_IPC4_MTRACE_NOT_AVAILABLE = 0,
SOF_IPC4_MTRACE_INTEL_CAVS_1_5,
SOF_IPC4_MTRACE_INTEL_CAVS_1_8,
SOF_IPC4_MTRACE_INTEL_CAVS_2,
};
/**
* struct sof_ipc4_fw_module - IPC4 module info
* @sof_man4_module: Module info
* @fw_mod_cfg: Pointer to the module config start of the module
* @m_ida: Module instance identifier
* @private: Module private data
*/
struct sof_ipc4_fw_module {
struct sof_man4_module man4_module_entry;
const struct sof_man4_module_config *fw_mod_cfg;
struct ida m_ida;
void *private;
};
/**
* struct sof_ipc4_fw_library - IPC4 library information
* @sof_fw: SOF Firmware of the library
* @id: Library ID. 0 is reserved for basefw, external libraries must have unique
* ID number between 1 and (sof_ipc4_fw_data.max_libs_count - 1)
* Note: sof_ipc4_fw_data.max_libs_count == 1 implies that external libraries
* are not supported
* @num_modules : Number of FW modules in the library
* @modules: Array of FW modules
*/
struct sof_ipc4_fw_library {
struct sof_firmware sof_fw;
const char *name;
u32 id;
int num_modules;
struct sof_ipc4_fw_module *modules;
};
/**
* struct sof_ipc4_fw_data - IPC4-specific data
* @manifest_fw_hdr_offset: FW header offset in the manifest
* @fw_lib_xa: XArray for firmware libraries, including basefw (ID = 0)
* Used to store the FW libraries and to manage the unique IDs of the
* libraries.
* @nhlt: NHLT table either from the BIOS or the topology manifest
* @mtrace_type: mtrace type supported on the booted platform
* @mtrace_log_bytes: log bytes as reported by the firmware via fw_config reply
* @num_playback_streams: max number of playback DMAs, needed for CHAIN_DMA offset
* @num_capture_streams: max number of capture DMAs
* @max_num_pipelines: max number of pipelines
* @max_libs_count: Maximum number of libraries support by the FW including the
* base firmware
*
* @load_library: Callback function for platform dependent library loading
* @pipeline_state_mutex: Mutex to protect pipeline triggers, ref counts, states and deletion
*/
struct sof_ipc4_fw_data {
u32 manifest_fw_hdr_offset;
struct xarray fw_lib_xa;
void *nhlt;
enum sof_ipc4_mtrace_type mtrace_type;
u32 mtrace_log_bytes;
int num_playback_streams;
int num_capture_streams;
int max_num_pipelines;
u32 max_libs_count;
bool fw_context_save;
int (*load_library)(struct snd_sof_dev *sdev,
struct sof_ipc4_fw_library *fw_lib, bool reload);
struct mutex pipeline_state_mutex; /* protect pipeline triggers, ref counts and states */
};
extern const struct sof_ipc_fw_loader_ops ipc4_loader_ops;
extern const struct sof_ipc_tplg_ops ipc4_tplg_ops;
extern const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops;
extern const struct sof_ipc_pcm_ops ipc4_pcm_ops;
extern const struct sof_ipc_fw_tracing_ops ipc4_mtrace_ops;
int sof_ipc4_set_pipeline_state(struct snd_sof_dev *sdev, u32 instance_id, u32 state);
int sof_ipc4_mtrace_update_pos(struct snd_sof_dev *sdev, int core);
int sof_ipc4_query_fw_configuration(struct snd_sof_dev *sdev);
int sof_ipc4_reload_fw_libraries(struct snd_sof_dev *sdev);
struct sof_ipc4_fw_module *sof_ipc4_find_module_by_uuid(struct snd_sof_dev *sdev,
const guid_t *uuid);
struct snd_sof_widget *sof_ipc4_find_swidget_by_ids(struct snd_sof_dev *sdev,
u32 module_id, int instance_id);
struct sof_ipc4_base_module_cfg;
void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
struct sof_ipc4_fw_module *fw_module,
struct sof_ipc4_base_module_cfg *basecfg);
size_t sof_ipc4_find_debug_slot_offset_by_type(struct snd_sof_dev *sdev,
u32 slot_type);
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, 2018 Oracle. All rights reserved.
*
* Trace point definitions for the "rpcrdma" subsystem.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcrdma
#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCRDMA_H
#include <linux/scatterlist.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
#include <rdma/ib_cm.h>
#include <trace/misc/rdma.h>
#include <trace/misc/sunrpc.h>
/**
** Event classes
**/
DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
TP_PROTO(
const struct rpc_rdma_cid *cid
),
TP_ARGS(cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
),
TP_printk("cq.id=%d cid=%d",
__entry->cq_id, __entry->completion_id
)
);
#define DEFINE_SIMPLE_CID_EVENT(name) \
DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
TP_PROTO( \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(cid) \
)
DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned long, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->status = wc->status;
if (wc->status)
__entry->vendor_err = wc->vendor_err;
else
__entry->vendor_err = 0;
),
TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
__entry->cq_id, __entry->completion_id,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
#define DEFINE_COMPLETION_EVENT(name) \
DEFINE_EVENT(rpcrdma_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned long, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->status = wc->status;
__entry->vendor_err = wc->vendor_err;
),
TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
__entry->cq_id, __entry->completion_id,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
#define DEFINE_SEND_FLUSH_EVENT(name) \
DEFINE_EVENT(rpcrdma_send_flush_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned long, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->status = wc->status;
if (wc->status)
__entry->vendor_err = wc->vendor_err;
else
__entry->vendor_err = 0;
),
TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
__entry->cq_id, __entry->completion_id,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
#define DEFINE_MR_COMPLETION_EVENT(name) \
DEFINE_EVENT(rpcrdma_mr_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, received)
__field(unsigned long, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->status = wc->status;
if (wc->status) {
__entry->received = 0;
__entry->vendor_err = wc->vendor_err;
} else {
__entry->received = wc->byte_len;
__entry->vendor_err = 0;
}
),
TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
__entry->cq_id, __entry->completion_id,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err,
__entry->received
)
);
#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \
DEFINE_EVENT(rpcrdma_receive_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, received)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->received = wc->byte_len;
),
TP_printk("cq.id=%u cid=%d received=%u",
__entry->cq_id, __entry->completion_id,
__entry->received
)
);
#define DEFINE_RECEIVE_SUCCESS_EVENT(name) \
DEFINE_EVENT(rpcrdma_receive_success_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
TP_ARGS(wc, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned long, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->status = wc->status;
__entry->vendor_err = wc->vendor_err;
),
TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
__entry->cq_id, __entry->completion_id,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
#define DEFINE_RECEIVE_FLUSH_EVENT(name) \
DEFINE_EVENT(rpcrdma_receive_flush_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO(
const struct rpcrdma_rep *rep
),
TP_ARGS(rep),
TP_STRUCT__entry(
__field(u32, xid)
__field(u32, version)
__field(u32, proc)
__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
__string(port, rpcrdma_portstr(rep->rr_rxprt))
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc);
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
__get_str(addr), __get_str(port),
__entry->xid, __entry->version, __entry->proc
)
);
#define DEFINE_REPLY_EVENT(name) \
DEFINE_EVENT(xprtrdma_reply_class, \
xprtrdma_reply_##name##_err, \
TP_PROTO( \
const struct rpcrdma_rep *rep \
), \
TP_ARGS(rep))
DECLARE_EVENT_CLASS(xprtrdma_rxprt,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt
),
TP_ARGS(r_xprt),
TP_STRUCT__entry(
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s",
__get_str(addr), __get_str(port)
)
);
#define DEFINE_RXPRT_EVENT(name) \
DEFINE_EVENT(xprtrdma_rxprt, name, \
TP_PROTO( \
const struct rpcrdma_xprt *r_xprt \
), \
TP_ARGS(r_xprt))
DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
int rc
),
TP_ARGS(r_xprt, rc),
TP_STRUCT__entry(
__field(int, rc)
__field(int, connect_status)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->rc = rc;
__entry->connect_status = r_xprt->rx_ep->re_connect_status;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s rc=%d connection status=%d",
__get_str(addr), __get_str(port),
__entry->rc, __entry->connect_status
)
);
#define DEFINE_CONN_EVENT(name) \
DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \
TP_PROTO( \
const struct rpcrdma_xprt *r_xprt, \
int rc \
), \
TP_ARGS(r_xprt, rc))
DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_PROTO(
const struct rpc_task *task,
unsigned int pos,
struct rpcrdma_mr *mr,
int nsegs
),
TP_ARGS(task, pos, mr, nsegs),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned int, pos)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(int, nsegs)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->pos = pos;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->nsegs = nsegs;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" pos=%u %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
)
);
#define DEFINE_RDCH_EVENT(name) \
DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
unsigned int pos, \
struct rpcrdma_mr *mr, \
int nsegs \
), \
TP_ARGS(task, pos, mr, nsegs))
DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
TP_PROTO(
const struct rpc_task *task,
struct rpcrdma_mr *mr,
int nsegs
),
TP_ARGS(task, mr, nsegs),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(int, nsegs)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->nsegs = nsegs;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
)
);
#define DEFINE_WRCH_EVENT(name) \
DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
struct rpcrdma_mr *mr, \
int nsegs \
), \
TP_ARGS(task, mr, nsegs))
TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
TRACE_DEFINE_ENUM(DMA_NONE);
#define xprtrdma_show_direction(x) \
__print_symbolic(x, \
{ DMA_BIDIRECTIONAL, "BIDIR" }, \
{ DMA_TO_DEVICE, "TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
{ DMA_NONE, "NONE" })
DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_PROTO(
const struct rpcrdma_mr *mr
),
TP_ARGS(mr),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, mr_id)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(u32, dir)
),
TP_fast_assign(
const struct rpcrdma_req *req = mr->mr_req;
if (req) {
const struct rpc_task *task = req->rl_slot.rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
} else {
__entry->task_id = 0;
__entry->client_id = -1;
}
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir)
)
);
#define DEFINE_MR_EVENT(name) \
DEFINE_EVENT(xprtrdma_mr_class, \
xprtrdma_mr_##name, \
TP_PROTO( \
const struct rpcrdma_mr *mr \
), \
TP_ARGS(mr))
DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
TP_PROTO(
const struct rpcrdma_mr *mr
),
TP_ARGS(mr),
TP_STRUCT__entry(
__field(u32, mr_id)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(u32, dir)
),
TP_fast_assign(
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
),
TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
__entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir)
)
);
#define DEFINE_ANON_MR_EVENT(name) \
DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
xprtrdma_mr_##name, \
TP_PROTO( \
const struct rpcrdma_mr *mr \
), \
TP_ARGS(mr))
DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct rpc_rqst *rqst
),
TP_ARGS(r_xprt, rqst),
TP_STRUCT__entry(
__field(u32, xid)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s xid=0x%08x",
__get_str(addr), __get_str(port), __entry->xid
)
);
#define DEFINE_CALLBACK_EVENT(name) \
DEFINE_EVENT(xprtrdma_callback_class, \
xprtrdma_cb_##name, \
TP_PROTO( \
const struct rpcrdma_xprt *r_xprt, \
const struct rpc_rqst *rqst \
), \
TP_ARGS(r_xprt, rqst))
/**
** Connection events
**/
TRACE_EVENT(xprtrdma_inline_thresh,
TP_PROTO(
const struct rpcrdma_ep *ep
),
TP_ARGS(ep),
TP_STRUCT__entry(
__field(unsigned int, inline_send)
__field(unsigned int, inline_recv)
__field(unsigned int, max_send)
__field(unsigned int, max_recv)
__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
const struct rdma_cm_id *id = ep->re_id;
__entry->inline_send = ep->re_inline_send;
__entry->inline_recv = ep->re_inline_recv;
__entry->max_send = ep->re_max_inline_send;
__entry->max_recv = ep->re_max_inline_recv;
memcpy(__entry->srcaddr, &id->route.addr.src_addr,
sizeof(struct sockaddr_in6));
memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
sizeof(struct sockaddr_in6));
),
TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
__entry->srcaddr, __entry->dstaddr,
__entry->inline_send, __entry->inline_recv,
__entry->max_send, __entry->max_recv
)
);
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
TRACE_EVENT(xprtrdma_device_removal,
TP_PROTO(
const struct rdma_cm_id *id
),
TP_ARGS(id),
TP_STRUCT__entry(
__string(name, id->device->name)
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
__assign_str(name);
memcpy(__entry->addr, &id->route.addr.dst_addr,
sizeof(struct sockaddr_in6));
),
TP_printk("device %s to be removed, disconnecting %pISpc\n",
__get_str(name), __entry->addr
)
);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
TRACE_EVENT(xprtrdma_op_connect,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned long delay
),
TP_ARGS(r_xprt, delay),
TP_STRUCT__entry(
__field(unsigned long, delay)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->delay = delay;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s delay=%lu",
__get_str(addr), __get_str(port), __entry->delay
)
);
TRACE_EVENT(xprtrdma_op_set_cto,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned long connect,
unsigned long reconnect
),
TP_ARGS(r_xprt, connect, reconnect),
TP_STRUCT__entry(
__field(unsigned long, connect)
__field(unsigned long, reconnect)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->connect = connect;
__entry->reconnect = reconnect;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
__get_str(addr), __get_str(port),
__entry->connect / HZ, __entry->reconnect / HZ
)
);
/**
** Call events
**/
TRACE_EVENT(xprtrdma_createmrs,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned int count
),
TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
__entry->count = count;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s created %u MRs",
__get_str(addr), __get_str(port), __entry->count
)
);
TRACE_EVENT(xprtrdma_nomrs_err,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req
),
TP_ARGS(r_xprt, req),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__assign_str(addr);
__assign_str(port);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
__entry->task_id, __entry->client_id,
__get_str(addr), __get_str(port)
)
);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
DEFINE_WRCH_EVENT(wp);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
TRACE_DEFINE_ENUM(rpcrdma_readch);
TRACE_DEFINE_ENUM(rpcrdma_areadch);
TRACE_DEFINE_ENUM(rpcrdma_writech);
TRACE_DEFINE_ENUM(rpcrdma_replych);
#define xprtrdma_show_chunktype(x) \
__print_symbolic(x, \
{ rpcrdma_noch, "inline" }, \
{ rpcrdma_noch_pullup, "pullup" }, \
{ rpcrdma_noch_mapped, "mapped" }, \
{ rpcrdma_readch, "read list" }, \
{ rpcrdma_areadch, "*read list" }, \
{ rpcrdma_writech, "write list" }, \
{ rpcrdma_replych, "reply chunk" })
TRACE_EVENT(xprtrdma_marshal,
TP_PROTO(
const struct rpcrdma_req *req,
unsigned int rtype,
unsigned int wtype
),
TP_ARGS(req, rtype, wtype),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(unsigned int, hdrlen)
__field(unsigned int, headlen)
__field(unsigned int, pagelen)
__field(unsigned int, taillen)
__field(unsigned int, rtype)
__field(unsigned int, wtype)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->hdrlen = req->rl_hdrbuf.len;
__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
__entry->pagelen = rqst->rq_snd_buf.page_len;
__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
__entry->rtype = rtype;
__entry->wtype = wtype;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->hdrlen,
__entry->headlen, __entry->pagelen, __entry->taillen,
xprtrdma_show_chunktype(__entry->rtype),
xprtrdma_show_chunktype(__entry->wtype)
)
);
TRACE_EVENT(xprtrdma_marshal_failed,
TP_PROTO(const struct rpc_rqst *rqst,
int ret
),
TP_ARGS(rqst, ret),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(int, ret)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->ret = ret;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
);
TRACE_EVENT(xprtrdma_prepsend_failed,
TP_PROTO(const struct rpc_rqst *rqst,
int ret
),
TP_ARGS(rqst, ret),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(int, ret)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->ret = ret;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
);
TRACE_EVENT(xprtrdma_post_send,
TP_PROTO(
const struct rpcrdma_req *req
),
TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
__field(int, signaled)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
const struct rpcrdma_sendctx *sc = req->rl_sendctx;
__entry->cq_id = sc->sc_cid.ci_queue_id;
__entry->completion_id = sc->sc_cid.ci_completion_id;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id,
__entry->cq_id, __entry->completion_id,
__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled" : "")
)
);
TRACE_EVENT(xprtrdma_post_send_err,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req,
int rc
),
TP_ARGS(r_xprt, req, rc),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, rc)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
const struct rpcrdma_ep *ep = r_xprt->rx_ep;
__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->rc = rc;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
__entry->task_id, __entry->client_id,
__entry->cq_id, __entry->rc
)
);
DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned int count
),
TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(unsigned int, count)
__field(int, posted)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpcrdma_ep *ep = r_xprt->rx_ep;
__entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->count = count;
__entry->posted = ep->re_receive_count;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
__get_str(addr), __get_str(port), __entry->cq_id,
__entry->count, __entry->posted
)
);
TRACE_EVENT(xprtrdma_post_recvs_err,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
int status
),
TP_ARGS(r_xprt, status),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, status)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpcrdma_ep *ep = r_xprt->rx_ep;
__entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->status = status;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
__get_str(addr), __get_str(port), __entry->cq_id,
__entry->status
)
);
TRACE_EVENT(xprtrdma_post_linv_err,
TP_PROTO(
const struct rpcrdma_req *req,
int status
),
TP_ARGS(req, status),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, status)
),
TP_fast_assign(
const struct rpc_task *task = req->rl_slot.rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->status = status;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
__entry->task_id, __entry->client_id, __entry->status
)
);
/**
** Completion events
**/
DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO(
const struct rpcrdma_mr *mr,
int rc
),
TP_ARGS(mr, rc),
TP_STRUCT__entry(
__field(u32, mr_id)
__field(int, rc)
),
TP_fast_assign(
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->rc = rc;
),
TP_printk("mr.id=%u: rc=%d",
__entry->mr_id, __entry->rc
)
);
TRACE_EVENT(xprtrdma_frwr_dereg,
TP_PROTO(
const struct rpcrdma_mr *mr,
int rc
),
TP_ARGS(mr, rc),
TP_STRUCT__entry(
__field(u32, mr_id)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(u32, dir)
__field(int, rc)
),
TP_fast_assign(
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
__entry->rc = rc;
),
TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
__entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir),
__entry->rc
)
);
TRACE_EVENT(xprtrdma_frwr_sgerr,
TP_PROTO(
const struct rpcrdma_mr *mr,
int sg_nents
),
TP_ARGS(mr, sg_nents),
TP_STRUCT__entry(
__field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, nents)
),
TP_fast_assign(
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->nents = sg_nents;
),
TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
__entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->nents
)
);
TRACE_EVENT(xprtrdma_frwr_maperr,
TP_PROTO(
const struct rpcrdma_mr *mr,
int num_mapped
),
TP_ARGS(mr, num_mapped),
TP_STRUCT__entry(
__field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, num_mapped)
__field(int, nents)
),
TP_fast_assign(
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->num_mapped = num_mapped;
__entry->nents = mr->mr_nents;
),
TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
__entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->num_mapped, __entry->nents
)
);
DEFINE_MR_EVENT(fastreg);
DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(reminv);
DEFINE_MR_EVENT(map);
DEFINE_ANON_MR_EVENT(unmap);
TRACE_EVENT(xprtrdma_dma_maperr,
TP_PROTO(
u64 addr
),
TP_ARGS(addr),
TP_STRUCT__entry(
__field(u64, addr)
),
TP_fast_assign(
__entry->addr = addr;
),
TP_printk("dma addr=0x%llx\n", __entry->addr)
);
/**
** Reply events
**/
TRACE_EVENT(xprtrdma_reply,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_rep *rep,
unsigned int credits
),
TP_ARGS(task, rep, credits),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(unsigned int, credits)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->credits = credits;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->credits
)
);
DEFINE_REPLY_EVENT(vers);
DEFINE_REPLY_EVENT(rqst);
DEFINE_REPLY_EVENT(short);
DEFINE_REPLY_EVENT(hdr);
TRACE_EVENT(xprtrdma_err_vers,
TP_PROTO(
const struct rpc_rqst *rqst,
__be32 *min,
__be32 *max
),
TP_ARGS(rqst, min, max),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, min)
__field(u32, max)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->min = be32_to_cpup(min);
__entry->max = be32_to_cpup(max);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->min, __entry->max
)
);
TRACE_EVENT(xprtrdma_err_chunk,
TP_PROTO(
const struct rpc_rqst *rqst
),
TP_ARGS(rqst),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
__entry->task_id, __entry->client_id, __entry->xid
)
);
TRACE_EVENT(xprtrdma_err_unrecognized,
TP_PROTO(
const struct rpc_rqst *rqst,
__be32 *procedure
),
TP_ARGS(rqst, procedure),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, procedure)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->procedure = be32_to_cpup(procedure);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->procedure
)
);
TRACE_EVENT(xprtrdma_fixup,
TP_PROTO(
const struct rpc_rqst *rqst,
unsigned long fixup
),
TP_ARGS(rqst, fixup),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned long, fixup)
__field(size_t, headlen)
__field(unsigned int, pagelen)
__field(size_t, taillen)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->fixup = fixup;
__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
__entry->pagelen = rqst->rq_rcv_buf.page_len;
__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
__entry->task_id, __entry->client_id, __entry->fixup,
__entry->headlen, __entry->pagelen, __entry->taillen
)
);
TRACE_EVENT(xprtrdma_decode_seg,
TP_PROTO(
u32 handle,
u32 length,
u64 offset
),
TP_ARGS(handle, length, offset),
TP_STRUCT__entry(
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
__entry->handle = handle;
__entry->length = length;
__entry->offset = offset;
),
TP_printk("%u@0x%016llx:0x%08x",
__entry->length, (unsigned long long)__entry->offset,
__entry->handle
)
);
TRACE_EVENT(xprtrdma_mrs_zap,
TP_PROTO(
const struct rpc_task *task
),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
__entry->task_id, __entry->client_id
)
);
/**
** Callback events
**/
TRACE_EVENT(xprtrdma_cb_setup,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned int reqs
),
TP_ARGS(r_xprt, reqs),
TP_STRUCT__entry(
__field(unsigned int, reqs)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->reqs = reqs;
__assign_str(addr);
__assign_str(port);
),
TP_printk("peer=[%s]:%s %u reqs",
__get_str(addr), __get_str(port), __entry->reqs
)
);
DEFINE_CALLBACK_EVENT(call);
DEFINE_CALLBACK_EVENT(reply);
/**
** Server-side RPC/RDMA events
**/
DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
long status
),
TP_ARGS(rdma, status),
TP_STRUCT__entry(
__field(long, status)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
__assign_str(addr);
),
TP_printk("addr=%s status=%ld",
__get_str(addr), __entry->status
)
);
#define DEFINE_ACCEPT_EVENT(name) \
DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
TP_PROTO( \
const struct svcxprt_rdma *rdma, \
long status \
), \
TP_ARGS(rdma, status))
DEFINE_ACCEPT_EVENT(pd);
DEFINE_ACCEPT_EVENT(qp);
DEFINE_ACCEPT_EVENT(fabric);
DEFINE_ACCEPT_EVENT(initdepth);
DEFINE_ACCEPT_EVENT(accept);
TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG);
TRACE_DEFINE_ENUM(RDMA_MSGP);
TRACE_DEFINE_ENUM(RDMA_DONE);
TRACE_DEFINE_ENUM(RDMA_ERROR);
#define show_rpcrdma_proc(x) \
__print_symbolic(x, \
{ RDMA_MSG, "RDMA_MSG" }, \
{ RDMA_NOMSG, "RDMA_NOMSG" }, \
{ RDMA_MSGP, "RDMA_MSGP" }, \
{ RDMA_DONE, "RDMA_DONE" }, \
{ RDMA_ERROR, "RDMA_ERROR" })
TRACE_EVENT(svcrdma_decode_rqst,
TP_PROTO(
const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p,
unsigned int hdrlen
),
TP_ARGS(ctxt, p, hdrlen),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
__field(u32, credits)
__field(unsigned int, hdrlen)
),
TP_fast_assign(
__entry->cq_id = ctxt->rc_cid.ci_queue_id;
__entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
__entry->proc = be32_to_cpup(p);
__entry->hdrlen = hdrlen;
),
TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
__entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);
TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO(
const struct svc_rdma_recv_ctxt *ctxt,
unsigned int hdrlen
),
TP_ARGS(ctxt, hdrlen),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, hdrlen)
),
TP_fast_assign(
__entry->cq_id = ctxt->rc_cid.ci_queue_id;
__entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->hdrlen = hdrlen;
),
TP_printk("cq.id=%u cid=%d hdrlen=%u",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen)
);
DECLARE_EVENT_CLASS(svcrdma_badreq_event,
TP_PROTO(
const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p
),
TP_ARGS(ctxt, p),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
__field(u32, credits)
),
TP_fast_assign(
__entry->cq_id = ctxt->rc_cid.ci_queue_id;
__entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
__entry->proc = be32_to_cpup(p);
),
TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
__entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits, __entry->proc)
);
#define DEFINE_BADREQ_EVENT(name) \
DEFINE_EVENT(svcrdma_badreq_event, \
svcrdma_decode_##name##_err, \
TP_PROTO( \
const struct svc_rdma_recv_ctxt *ctxt, \
__be32 *p \
), \
TP_ARGS(ctxt, p))
DEFINE_BADREQ_EVENT(badvers);
DEFINE_BADREQ_EVENT(drop);
DEFINE_BADREQ_EVENT(badproc);
DEFINE_BADREQ_EVENT(parse);
TRACE_EVENT(svcrdma_encode_wseg,
TP_PROTO(
const struct svc_rdma_send_ctxt *ctxt,
u32 segno,
u32 handle,
u32 length,
u64 offset
),
TP_ARGS(ctxt, segno, handle, length, offset),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, segno)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
__entry->cq_id = ctxt->sc_cid.ci_queue_id;
__entry->completion_id = ctxt->sc_cid.ci_completion_id;
__entry->segno = segno;
__entry->handle = handle;
__entry->length = length;
__entry->offset = offset;
),
TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
)
);
TRACE_EVENT(svcrdma_decode_rseg,
TP_PROTO(
const struct rpc_rdma_cid *cid,
const struct svc_rdma_chunk *chunk,
const struct svc_rdma_segment *segment
),
TP_ARGS(cid, chunk, segment),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, segno)
__field(u32, position)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->segno = chunk->ch_segcount;
__entry->position = chunk->ch_position;
__entry->handle = segment->rs_handle;
__entry->length = segment->rs_length;
__entry->offset = segment->rs_offset;
),
TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->position, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
)
);
TRACE_EVENT(svcrdma_decode_wseg,
TP_PROTO(
const struct rpc_rdma_cid *cid,
const struct svc_rdma_chunk *chunk,
u32 segno
),
TP_ARGS(cid, chunk, segno),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u32, segno)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
const struct svc_rdma_segment *segment =
&chunk->ch_segments[segno];
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->segno = segno;
__entry->handle = segment->rs_handle;
__entry->length = segment->rs_length;
__entry->offset = segment->rs_offset;
),
TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
)
);
DECLARE_EVENT_CLASS(svcrdma_error_event,
TP_PROTO(
__be32 xid
),
TP_ARGS(xid),
TP_STRUCT__entry(
__field(u32, xid)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(xid);
),
TP_printk("xid=0x%08x",
__entry->xid
)
);
#define DEFINE_ERROR_EVENT(name) \
DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
TP_PROTO( \
__be32 xid \
), \
TP_ARGS(xid))
DEFINE_ERROR_EVENT(vers);
DEFINE_ERROR_EVENT(chunk);
/**
** Server-side RDMA API events
**/
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
),
TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
__entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
TP_ARGS(cid, dma_addr, length) \
)
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
u64 offset,
u32 handle,
unsigned int nents,
int status
),
TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(u32, handle)
__field(u64, offset)
__field(unsigned int, nents)
__field(int, status)
),
TP_fast_assign(
__entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->handle = handle;
__entry->offset = offset;
__entry->nents = nents;
__entry->status = status;
),
TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
__entry->cq_id, (unsigned long long)__entry->offset,
__entry->handle, __entry->nents, __entry->status
)
);
TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
),
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(unsigned int, num_sges)
),
TP_fast_assign(
__entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
),
TP_printk("cq.id=%u num_sges=%d",
__entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
const struct rpc_rdma_cid *cid,
unsigned int pageno
),
TP_ARGS(cid, pageno),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, pageno)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
),
TP_printk("cq.id=%u cid=%d pageno=%u",
__entry->cq_id, __entry->completion_id,
__entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
),
TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
__entry->cq_id, __entry->completion_id,
__entry->remaining, __entry->seg_no, __entry->num_segs
)
);
TRACE_EVENT(svcrdma_send_pullup,
TP_PROTO(
const struct svc_rdma_send_ctxt *ctxt,
unsigned int msglen
),
TP_ARGS(ctxt, msglen),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, hdrlen)
__field(unsigned int, msglen)
),
TP_fast_assign(
__entry->cq_id = ctxt->sc_cid.ci_queue_id;
__entry->completion_id = ctxt->sc_cid.ci_completion_id;
__entry->hdrlen = ctxt->sc_hdrbuf.len,
__entry->msglen = msglen;
),
TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen, __entry->msglen,
__entry->hdrlen + __entry->msglen)
);
TRACE_EVENT(svcrdma_send_err,
TP_PROTO(
const struct svc_rqst *rqst,
int status
),
TP_ARGS(rqst, status),
TP_STRUCT__entry(
__field(int, status)
__field(u32, xid)
__string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
__entry->xid = __be32_to_cpu(rqst->rq_xid);
__assign_str(addr);
),
TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
__entry->xid, __entry->status
)
);
TRACE_EVENT(svcrdma_post_send,
TP_PROTO(
const struct svc_rdma_send_ctxt *ctxt
),
TP_ARGS(ctxt),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(unsigned int, num_sge)
__field(u32, inv_rkey)
),
TP_fast_assign(
const struct ib_send_wr *wr = &ctxt->sc_send_wr;
__entry->cq_id = ctxt->sc_cid.ci_queue_id;
__entry->completion_id = ctxt->sc_cid.ci_completion_id;
__entry->num_sge = wr->num_sge;
__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
wr->ex.invalidate_rkey : 0;
),
TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
);
DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
TRACE_EVENT(svcrdma_rq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
int status
),
TP_ARGS(rdma, status),
TP_STRUCT__entry(
__field(int, status)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
__assign_str(addr);
),
TP_printk("addr=%s status=%d",
__get_str(addr), __entry->status
)
);
DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
TP_PROTO(
const struct rpc_rdma_cid *cid,
int sqecount
),
TP_ARGS(cid, sqecount),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(int, sqecount)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->sqecount = sqecount;
),
TP_printk("cq.id=%u cid=%d sqecount=%d",
__entry->cq_id, __entry->completion_id,
__entry->sqecount
)
);
#define DEFINE_POST_CHUNK_EVENT(name) \
DEFINE_EVENT(svcrdma_post_chunk_class, \
svcrdma_post_##name##_chunk, \
TP_PROTO( \
const struct rpc_rdma_cid *cid, \
int sqecount \
), \
TP_ARGS(cid, sqecount))
DEFINE_POST_CHUNK_EVENT(read);
DEFINE_POST_CHUNK_EVENT(write);
DEFINE_POST_CHUNK_EVENT(reply);
DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
TP_PROTO(
const struct rpc_rdma_cid *cid,
int sqecount
),
TP_ARGS(cid, sqecount)
);
TRACE_EVENT(svcrdma_wc_read,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid,
unsigned int totalbytes,
const ktime_t posttime
),
TP_ARGS(wc, cid, totalbytes, posttime),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(s64, read_latency)
__field(unsigned int, totalbytes)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->totalbytes = totalbytes;
__entry->read_latency = ktime_us_delta(ktime_get(), posttime);
),
TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
__entry->cq_id, __entry->completion_id,
__entry->totalbytes, __entry->read_latency
)
);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_reply);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_err);
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
const struct ib_event *event,
const struct sockaddr *sap
),
TP_ARGS(event, sap),
TP_STRUCT__entry(
__field(unsigned int, event)
__string(device, event->device->name)
__array(__u8, addr, INET6_ADDRSTRLEN + 10)
),
TP_fast_assign(
__entry->event = event->event;
__assign_str(device);
snprintf(__entry->addr, sizeof(__entry->addr) - 1,
"%pISpc", sap);
),
TP_printk("addr=%s dev=%s event=%s (%u)",
__entry->addr, __get_str(device),
rdma_show_ib_event(__entry->event), __entry->event
)
);
TRACE_EVENT(svcrdma_device_removal,
TP_PROTO(
const struct rdma_cm_id *id
),
TP_ARGS(id),
TP_STRUCT__entry(
__string(name, id->device->name)
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
__assign_str(name);
memcpy(__entry->addr, &id->route.addr.dst_addr,
sizeof(struct sockaddr_in6));
),
TP_printk("device %s to be removed, disconnecting %pISpc\n",
__get_str(name), __entry->addr
)
);
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
const struct rpc_rdma_cid *cid
),
TP_ARGS(rdma, cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(int, avail)
__field(int, depth)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
),
TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
__entry->cq_id, __entry->completion_id,
__entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
DEFINE_EVENT(svcrdma_sendqueue_class, name, \
TP_PROTO( \
const struct svcxprt_rdma *rdma, \
const struct rpc_rdma_cid *cid \
), \
TP_ARGS(rdma, cid) \
)
DEFINE_SQ_EVENT(svcrdma_sq_full);
DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
const struct rpc_rdma_cid *cid,
int status
),
TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
),
TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
__entry->cq_id, __entry->completion_id,
__entry->avail, __entry->depth, __entry->status
)
);
DECLARE_EVENT_CLASS(rpcrdma_client_device_class,
TP_PROTO(
const struct ib_device *device
),
TP_ARGS(device),
TP_STRUCT__entry(
__string(name, device->name)
),
TP_fast_assign(
__assign_str(name);
),
TP_printk("device=%s",
__get_str(name)
)
);
#define DEFINE_CLIENT_DEVICE_EVENT(name) \
DEFINE_EVENT(rpcrdma_client_device_class, name, \
TP_PROTO( \
const struct ib_device *device \
), \
TP_ARGS(device) \
)
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_completion);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_add_one);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done);
DECLARE_EVENT_CLASS(rpcrdma_client_register_class,
TP_PROTO(
const struct ib_device *device,
const struct rpcrdma_notification *rn
),
TP_ARGS(device, rn),
TP_STRUCT__entry(
__string(name, device->name)
__field(void *, callback)
__field(u32, index)
),
TP_fast_assign(
__assign_str(name);
__entry->callback = rn->rn_done;
__entry->index = rn->rn_index;
),
TP_printk("device=%s index=%u done callback=%pS\n",
__get_str(name), __entry->index, __entry->callback
)
);
#define DEFINE_CLIENT_REGISTER_EVENT(name) \
DEFINE_EVENT(rpcrdma_client_register_class, name, \
TP_PROTO( \
const struct ib_device *device, \
const struct rpcrdma_notification *rn \
), \
TP_ARGS(device, rn))
DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register);
DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister);
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_RESET_H__
#define __INTEL_RESET_H__
struct drm_i915_private;
void intel_display_reset_prepare(struct drm_i915_private *i915);
void intel_display_reset_finish(struct drm_i915_private *i915);
#endif /* __INTEL_RESET_H__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright (C) 2017 Rafał Miłecki <[email protected]>
* Copyright (C) 2018 Rene Kjellerup <[email protected]>
*/
/dts-v1/;
#include "bcm4708.dtsi"
#include "bcm5301x-nand-cs0-bch8.dtsi"
/ {
compatible = "linksys,ea6500-v2", "brcm,bcm4708";
model = "Linksys EA6500 V2";
chosen {
bootargs = "console=ttyS0,115200";
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>,
<0x88000000 0x08000000>;
};
gpio-keys {
compatible = "gpio-keys";
button-wps {
label = "WPS";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&chipcommon 7 GPIO_ACTIVE_LOW>;
};
button-restart {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
};
};
};
&usb3_phy {
status = "okay";
};
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2016 Martin Blumenstingl <[email protected]>.
* Based on meson-gx-p23x-q20x.dtsi:
* - Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <[email protected]>
* - Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <[email protected]>
*/
/* Common DTSI for devices which are based on the P212 reference board. */
#include "meson-gxl-s905x.dtsi"
/ {
aliases {
serial0 = &uart_AO;
ethernet0 = ðmac;
};
chosen {
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
hdmi_5v: regulator-hdmi-5v {
compatible = "regulator-fixed";
regulator-name = "HDMI_5V";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio GPIOH_3 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
};
vddio_boot: regulator-vddio-boot {
compatible = "regulator-fixed";
regulator-name = "VDDIO_BOOT";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vddao_3v3: regulator-vddao-3v3 {
compatible = "regulator-fixed";
regulator-name = "VDDAO_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
vddio_ao18: regulator-vddio-ao18 {
compatible = "regulator-fixed";
regulator-name = "VDDIO_AO18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vcc_3v3: regulator-vcc-3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
emmc_pwrseq: emmc-pwrseq {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
};
wifi32k: wifi32k {
compatible = "pwm-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
};
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
};
ðmac {
status = "okay";
};
&ir {
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
pinctrl-names = "default";
};
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
&saradc {
status = "okay";
vref-supply = <&vddio_ao18>;
};
/* Wireless SDIO Module */
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
pinctrl-1 = <&sdio_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
bus-width = <4>;
cap-sd-highspeed;
max-frequency = <50000000>;
non-removable;
disable-wp;
/* WiFi firmware requires power to be kept while in suspend */
keep-power-in-suspend;
mmc-pwrseq = <&sdio_pwrseq>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
};
/* SD card */
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
pinctrl-1 = <&sdcard_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
max-frequency = <50000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
};
/* eMMC */
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <8>;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
disable-wp;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
mmc-pwrseq = <&emmc_pwrseq>;
vmmc-supply = <&vcc_3v3>;
vqmmc-supply = <&vddio_boot>;
};
/* This is connected to the Bluetooth module: */
&uart_A {
status = "okay";
pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
pinctrl-names = "default";
uart-has-rtscts;
bluetooth {
compatible = "brcm,bcm43438-bt";
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;
clock-names = "lpo";
};
};
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb {
status = "okay";
dr_mode = "host";
};
&usb2_phy0 {
/*
* HDMI_5V is also used as supply for the USB VBUS.
*/
phy-supply = <&hdmi_5v>;
};
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*/
#ifndef _QEDI_H_
#define _QEDI_H_
#define __PREVENT_QED_HSI__
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_host.h>
#include <linux/uio_driver.h>
#include "qedi_hsi.h"
#include <linux/qed/qed_if.h>
#include "qedi_dbg.h"
#include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_ll2_if.h>
#include "qedi_version.h"
#include "qedi_nvm_iscsi_cfg.h"
#define QEDI_MODULE_NAME "qedi"
struct qedi_endpoint;
#ifndef GET_FIELD2
#define GET_FIELD2(value, name) \
(((value) & (name ## _MASK)) >> (name ## _OFFSET))
#endif
/*
* PCI function probe defines
*/
#define QEDI_MODE_NORMAL 0
#define QEDI_MODE_RECOVERY 1
#define QEDI_MODE_SHUTDOWN 2
#define ISCSI_WQE_SET_PTU_INVALIDATE 1
#define QEDI_MAX_ISCSI_TASK 4096
#define QEDI_MAX_TASK_NUM 0x0FFF
#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */
#define MAX_OUTSTANDING_TASKS_PER_CON 1024
#define QEDI_MAX_BD_LEN 0xffff
#define QEDI_BD_SPLIT_SZ 0x1000
#define QEDI_PAGE_SIZE 4096
#define QEDI_FAST_SGE_COUNT 4
/* MAX Length for cached SGL */
#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
num_online_cpus())
#define QEDI_LOCAL_PORT_MIN 60000
#define QEDI_LOCAL_PORT_MAX 61024
#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
#define QEDI_LOCAL_PORT_INVALID 0xffff
#define TX_RX_RING 16
#define RX_RING (TX_RX_RING - 1)
#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
#define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL
enum qedi_nvm_tgts {
QEDI_NVM_TGT_PRI,
QEDI_NVM_TGT_SEC,
};
struct qedi_nvm_iscsi_image {
struct nvm_iscsi_cfg iscsi_cfg;
u32 crc;
};
struct qedi_uio_ctrl {
/* meta data */
u32 uio_hsi_version;
/* user writes */
u32 host_tx_prod;
u32 host_rx_cons;
u32 host_rx_bd_cons;
u32 host_tx_pkt_len;
u32 host_rx_cons_cnt;
/* driver writes */
u32 hw_tx_cons;
u32 hw_rx_prod;
u32 hw_rx_bd_prod;
u32 hw_rx_prod_cnt;
/* other */
u8 mac_addr[6];
u8 reserve[2];
};
struct qedi_rx_bd {
u32 rx_pkt_index;
u32 rx_pkt_len;
u16 vlan_id;
};
#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
(QEDI_MAX_RX_DESC_CNT - 1)) ? \
(x) + 2 : (x) + 1)
struct qedi_uio_dev {
struct uio_info qedi_uinfo;
u32 uio_dev;
struct list_head list;
u32 ll2_ring_size;
void *ll2_ring;
u32 ll2_buf_size;
void *ll2_buf;
void *rx_pkt;
void *tx_pkt;
struct qedi_ctx *qedi;
struct pci_dev *pdev;
void *uctrl;
};
/* List to maintain the skb pointers */
struct skb_work_list {
struct list_head list;
struct sk_buff *skb;
u16 vlan_id;
};
/* Queue sizes in number of elements */
#define QEDI_SQ_SIZE MAX_OUTSTANDING_TASKS_PER_CON
#define QEDI_CQ_SIZE 2048
#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
#define QEDI_PROTO_CQ_PROD_IDX 0
struct qedi_glbl_q_params {
u64 hw_p_cq; /* Completion queue PBL */
u64 hw_p_rq; /* Request queue PBL */
u64 hw_p_cmdq; /* Command queue PBL */
};
struct global_queue {
union iscsi_cqe *cq;
dma_addr_t cq_dma;
u32 cq_mem_size;
u32 cq_cons_idx; /* Completion queue consumer index */
void *cq_pbl;
dma_addr_t cq_pbl_dma;
u32 cq_pbl_size;
};
struct qedi_fastpath {
struct qed_sb_info *sb_info;
u16 sb_id;
#define QEDI_NAME_SIZE 16
char name[QEDI_NAME_SIZE];
struct qedi_ctx *qedi;
};
/* Used to pass fastpath information needed to process CQEs */
struct qedi_io_work {
struct list_head list;
struct iscsi_cqe_solicited cqe;
u16 que_idx;
};
/**
* struct iscsi_cid_queue - Per adapter iscsi cid queue
*
* @cid_que_base: queue base memory
* @cid_que: queue memory pointer
* @cid_q_prod_idx: produce index
* @cid_q_cons_idx: consumer index
* @cid_q_max_idx: max index. used to detect wrap around condition
* @cid_free_cnt: queue size
* @conn_cid_tbl: iscsi cid to conn structure mapping table
*
* Per adapter iSCSI CID Queue
*/
struct iscsi_cid_queue {
void *cid_que_base;
u32 *cid_que;
u32 cid_q_prod_idx;
u32 cid_q_cons_idx;
u32 cid_q_max_idx;
u32 cid_free_cnt;
struct qedi_conn **conn_cid_tbl;
};
struct qedi_portid_tbl {
spinlock_t lock; /* Port id lock */
u16 start;
u16 max;
u16 next;
unsigned long *table;
};
struct qedi_itt_map {
__le32 itt;
struct qedi_cmd *p_cmd;
};
/* I/O tracing entry */
#define QEDI_IO_TRACE_SIZE 2048
struct qedi_io_log {
#define QEDI_IO_TRACE_REQ 0
#define QEDI_IO_TRACE_RSP 1
u8 direction;
u16 task_id;
u32 cid;
u32 port_id; /* Remote port fabric ID */
int lun;
u8 op; /* SCSI CDB */
u8 lba[4];
unsigned int bufflen; /* SCSI buffer length */
unsigned int sg_count; /* Number of SG elements */
u8 fast_sgs; /* number of fast sgls */
u8 slow_sgs; /* number of slow sgls */
u8 cached_sgs; /* number of cached sgls */
int result; /* Result passed back to mid-layer */
unsigned long jiffies; /* Time stamp when I/O logged */
int refcount; /* Reference count for task id */
unsigned int blk_req_cpu; /* CPU that the task is queued on by
* blk layer
*/
unsigned int req_cpu; /* CPU that the task is queued on */
unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
* returned to blk layer
*/
bool cached_sge;
bool slow_sge;
bool fast_sge;
};
/* Number of entries in BDQ */
#define QEDI_BDQ_NUM 256
#define QEDI_BDQ_BUF_SIZE 256
/* DMA coherent buffers for BDQ */
struct qedi_bdq_buf {
void *buf_addr;
dma_addr_t buf_dma;
};
/* Main port level struct */
struct qedi_ctx {
struct qedi_dbg_ctx dbg_ctx;
struct Scsi_Host *shost;
struct pci_dev *pdev;
struct qed_dev *cdev;
struct qed_dev_iscsi_info dev_info;
struct qed_int_info int_info;
struct qedi_glbl_q_params *p_cpuq;
struct global_queue **global_queues;
/* uio declaration */
struct qedi_uio_dev *udev;
struct list_head ll2_skb_list;
spinlock_t ll2_lock; /* Light L2 lock */
spinlock_t hba_lock; /* per port lock */
struct task_struct *ll2_recv_thread;
unsigned long qedi_err_flags;
#define QEDI_ERR_ATTN_CLR_EN 0
#define QEDI_ERR_IS_RECOVERABLE 2
#define QEDI_ERR_OVERRIDE_EN 31
unsigned long flags;
#define UIO_DEV_OPENED 1
#define QEDI_IOTHREAD_WAKE 2
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
#define QEDI_IN_SHUTDOWN 7
#define QEDI_BLOCK_IO 8
u8 mac[ETH_ALEN];
u32 src_ip[4];
u8 ip_type;
/* Physical address of above array */
dma_addr_t hw_p_cpuq;
struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
void *bdq_pbl;
dma_addr_t bdq_pbl_dma;
size_t bdq_pbl_mem_size;
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
struct qedi_nvm_iscsi_image *iscsi_image;
dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
u16 bdq_prod_idx;
u16 rq_num_entries;
u32 max_sqes;
u8 num_queues;
u32 max_active_conns;
s32 msix_count;
struct iscsi_cid_queue cid_que;
struct qedi_endpoint **ep_tbl;
struct qedi_portid_tbl lcl_port_tbl;
/* Rx fast path intr context */
struct qed_sb_info *sb_array;
struct qedi_fastpath *fp_array;
struct qed_iscsi_tid tasks;
#define QEDI_LINK_DOWN 0
#define QEDI_LINK_UP 1
atomic_t link_state;
#define QEDI_RESERVE_TASK_ID 0
#define MAX_ISCSI_TASK_ENTRIES 4096
#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
struct qedi_itt_map *itt_map;
u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
struct qed_pf_params pf_params;
struct workqueue_struct *tmf_thread;
struct workqueue_struct *offload_thread;
u16 ll2_mtu;
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
struct delayed_work board_disable_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
s32 last_tidx_clear;
struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
spinlock_t io_trace_lock; /* prtect trace Log buf */
u16 io_trace_idx;
unsigned int intr_cpu;
u32 cached_sgls;
bool use_cached_sge;
u32 slow_sgls;
bool use_slow_sge;
u32 fast_sgls;
bool use_fast_sge;
atomic_t num_offloads;
#define SYSFS_FLAG_FW_SEL_BOOT 2
#define IPV6_LEN 41
#define IPV4_LEN 17
struct iscsi_boot_kset *boot_kset;
/* Used for iscsi statistics */
struct mutex stats_lock;
};
struct qedi_work {
struct list_head list;
struct qedi_ctx *qedi;
union iscsi_cqe cqe;
u16 que_idx;
bool is_solicited;
};
struct qedi_percpu_s {
struct task_struct *iothread;
struct list_head work_list;
spinlock_t p_work_lock; /* Per cpu worker lock */
};
static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
{
return (info->blocks[tid / info->num_tids_per_block] +
(tid % info->num_tids_per_block) * info->size);
}
#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#endif /* _QEDI_H_ */
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
#define __LINUX_BRIDGE_EBT_PKTTYPE_H
#include <linux/types.h>
struct ebt_pkttype_info {
__u8 pkt_type;
__u8 invert;
};
#define EBT_PKTTYPE_MATCH "pkttype"
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access
*
* This file is based on:
*
* Marvel / EV7 System Programmer's Manual
* Revision 1.00
* 14 May 2001
*/
#ifndef __ALPHA_MARVEL__H__
#define __ALPHA_MARVEL__H__
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
#define MARVEL_MAX_PIDS 32 /* as long as we rely on 43-bit superpage */
#define MARVEL_IRQ_VEC_PE_SHIFT (10)
#define MARVEL_IRQ_VEC_IRQ_MASK ((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1)
#define MARVEL_NR_IRQS \
(16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT)))
/*
* EV7 RBOX Registers
*/
typedef struct {
volatile unsigned long csr __attribute__((aligned(16)));
} ev7_csr;
typedef struct {
ev7_csr RBOX_CFG; /* 0x0000 */
ev7_csr RBOX_NSVC;
ev7_csr RBOX_EWVC;
ev7_csr RBOX_WHAMI;
ev7_csr RBOX_TCTL; /* 0x0040 */
ev7_csr RBOX_INT;
ev7_csr RBOX_IMASK;
ev7_csr RBOX_IREQ;
ev7_csr RBOX_INTQ; /* 0x0080 */
ev7_csr RBOX_INTA;
ev7_csr RBOX_IT;
ev7_csr RBOX_SCRATCH1;
ev7_csr RBOX_SCRATCH2; /* 0x00c0 */
ev7_csr RBOX_L_ERR;
} ev7_csrs;
/*
* EV7 CSR addressing macros
*/
#define EV7_MASK40(addr) ((addr) & ((1UL << 41) - 1))
#define EV7_KERN_ADDR(addr) ((void *)(IDENT_ADDR | EV7_MASK40(addr)))
#define EV7_PE_MASK 0x1ffUL /* 9 bits ( 256 + mem/io ) */
#define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35)
#define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off))
#define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL))
#define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off)))
#define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe)))
#define EV7_CSR_OFFSET(name) ((unsigned long)&((ev7_csrs *)NULL)->name.csr)
/*
* IO7 registers
*/
typedef struct {
volatile unsigned long csr __attribute__((aligned(64)));
} io7_csr;
typedef struct {
/* I/O Port Control Registers */
io7_csr POx_CTRL; /* 0x0000 */
io7_csr POx_CACHE_CTL;
io7_csr POx_TIMER;
io7_csr POx_IO_ADR_EXT;
io7_csr POx_MEM_ADR_EXT; /* 0x0100 */
io7_csr POx_XCAL_CTRL;
io7_csr rsvd1[2]; /* ?? spec doesn't show 0x180 */
io7_csr POx_DM_SOURCE; /* 0x0200 */
io7_csr POx_DM_DEST;
io7_csr POx_DM_SIZE;
io7_csr POx_DM_CTRL;
io7_csr rsvd2[4]; /* 0x0300 */
/* AGP Control Registers -- port 3 only */
io7_csr AGP_CAP_ID; /* 0x0400 */
io7_csr AGP_STAT;
io7_csr AGP_CMD;
io7_csr rsvd3;
/* I/O Port Monitor Registers */
io7_csr POx_MONCTL; /* 0x0500 */
io7_csr POx_CTRA;
io7_csr POx_CTRB;
io7_csr POx_CTR56;
io7_csr POx_SCRATCH; /* 0x0600 */
io7_csr POx_XTRA_A;
io7_csr POx_XTRA_TS;
io7_csr POx_XTRA_Z;
io7_csr rsvd4; /* 0x0700 */
io7_csr POx_THRESHA;
io7_csr POx_THRESHB;
io7_csr rsvd5[33];
/* System Address Space Window Control Registers */
io7_csr POx_WBASE[4]; /* 0x1000 */
io7_csr POx_WMASK[4];
io7_csr POx_TBASE[4];
io7_csr POx_SG_TBIA;
io7_csr POx_MSI_WBASE;
io7_csr rsvd6[50];
/* I/O Port Error Registers */
io7_csr POx_ERR_SUM;
io7_csr POx_FIRST_ERR;
io7_csr POx_MSK_HEI;
io7_csr POx_TLB_ERR;
io7_csr POx_SPL_COMPLT;
io7_csr POx_TRANS_SUM;
io7_csr POx_FRC_PCI_ERR;
io7_csr POx_MULT_ERR;
io7_csr rsvd7[8];
/* I/O Port End of Interrupt Registers */
io7_csr EOI_DAT;
io7_csr rsvd8[7];
io7_csr POx_IACK_SPECIAL;
io7_csr rsvd9[103];
} io7_ioport_csrs;
typedef struct {
io7_csr IO_ASIC_REV; /* 0x30.0000 */
io7_csr IO_SYS_REV;
io7_csr SER_CHAIN3;
io7_csr PO7_RST1;
io7_csr PO7_RST2; /* 0x30.0100 */
io7_csr POx_RST[4];
io7_csr IO7_DWNH;
io7_csr IO7_MAF;
io7_csr IO7_MAF_TO;
io7_csr IO7_ACC_CLUMP; /* 0x30.0300 */
io7_csr IO7_PMASK;
io7_csr IO7_IOMASK;
io7_csr IO7_UPH;
io7_csr IO7_UPH_TO; /* 0x30.0400 */
io7_csr RBX_IREQ_OFF;
io7_csr RBX_INTA_OFF;
io7_csr INT_RTY;
io7_csr PO7_MONCTL; /* 0x30.0500 */
io7_csr PO7_CTRA;
io7_csr PO7_CTRB;
io7_csr PO7_CTR56;
io7_csr PO7_SCRATCH; /* 0x30.0600 */
io7_csr PO7_XTRA_A;
io7_csr PO7_XTRA_TS;
io7_csr PO7_XTRA_Z;
io7_csr PO7_PMASK; /* 0x30.0700 */
io7_csr PO7_THRESHA;
io7_csr PO7_THRESHB;
io7_csr rsvd1[97];
io7_csr PO7_ERROR_SUM; /* 0x30.2000 */
io7_csr PO7_BHOLE_MASK;
io7_csr PO7_HEI_MSK;
io7_csr PO7_CRD_MSK;
io7_csr PO7_UNCRR_SYM; /* 0x30.2100 */
io7_csr PO7_CRRCT_SYM;
io7_csr PO7_ERR_PKT[2];
io7_csr PO7_UGBGE_SYM; /* 0x30.2200 */
io7_csr rsbv2[887];
io7_csr PO7_LSI_CTL[128]; /* 0x31.0000 */
io7_csr rsvd3[123];
io7_csr HLT_CTL; /* 0x31.3ec0 */
io7_csr HPI_CTL; /* 0x31.3f00 */
io7_csr CRD_CTL;
io7_csr STV_CTL;
io7_csr HEI_CTL;
io7_csr PO7_MSI_CTL[16]; /* 0x31.4000 */
io7_csr rsvd4[240];
/*
* Interrupt Diagnostic / Test
*/
struct {
io7_csr INT_PND;
io7_csr INT_CLR;
io7_csr INT_EOI;
io7_csr rsvd[29];
} INT_DIAG[4];
io7_csr rsvd5[125]; /* 0x31.a000 */
io7_csr MISC_PND; /* 0x31.b800 */
io7_csr rsvd6[31];
io7_csr MSI_PND[16]; /* 0x31.c000 */
io7_csr rsvd7[16];
io7_csr MSI_CLR[16]; /* 0x31.c800 */
} io7_port7_csrs;
/*
* IO7 DMA Window Base register (POx_WBASEx)
*/
#define wbase_m_ena 0x1
#define wbase_m_sg 0x2
#define wbase_m_dac 0x4
#define wbase_m_addr 0xFFF00000
union IO7_POx_WBASE {
struct {
unsigned ena : 1; /* <0> */
unsigned sg : 1; /* <1> */
unsigned dac : 1; /* <2> -- window 3 only */
unsigned rsvd1 : 17;
unsigned addr : 12; /* <31:20> */
unsigned rsvd2 : 32;
} bits;
unsigned as_long[2];
unsigned as_quad;
};
/*
* IO7 IID (Interrupt IDentifier) format
*
* For level-sensative interrupts, int_num is encoded as:
*
* bus/port slot/device INTx
* <7:5> <4:2> <1:0>
*/
union IO7_IID {
struct {
unsigned int_num : 9; /* <8:0> */
unsigned tpu_mask : 4; /* <12:9> rsvd */
unsigned msi : 1; /* 13 */
unsigned ipe : 10; /* <23:14> */
unsigned long rsvd : 40;
} bits;
unsigned int as_long[2];
unsigned long as_quad;
};
/*
* IO7 addressing macros
*/
#define IO7_KERN_ADDR(addr) (EV7_KERN_ADDR(addr))
#define IO7_PORT_MASK 0x07UL /* 3 bits of port */
#define IO7_IPE(pe) (EV7_IPE(pe))
#define IO7_IPORT(port) ((~((long)(port)) & IO7_PORT_MASK) << 32)
#define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port))
#define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL)
#define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL)
#define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL)
#define IO7_CSR_PHYS(pe, port, off) \
(IO7_HOSE(pe, port) | 0xFF800000UL | (off))
#define IO7_CSRS_PHYS(pe, port) (IO7_CSR_PHYS(pe, port, 0UL))
#define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL))
#define IO7_MEM_KERN(pe, port) (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port)))
#define IO7_CONF_KERN(pe, port) (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port)))
#define IO7_IO_KERN(pe, port) (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port)))
#define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off)))
#define IO7_CSRS_KERN(pe, port) (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port)))
#define IO7_PORT7_CSRS_KERN(pe) (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe)))
#define IO7_PLL_RNGA(pll) (((pll) >> 3) & 0x7)
#define IO7_PLL_RNGB(pll) (((pll) >> 6) & 0x7)
#define IO7_MEM_SPACE (2UL * 1024 * 1024 * 1024) /* 2GB MEM */
#define IO7_IO_SPACE (8UL * 1024 * 1024) /* 8MB I/O */
/*
* Offset between ram physical addresses and pci64 DAC addresses
*/
#define IO7_DAC_OFFSET (1UL << 49)
/*
* This is needed to satisify the IO() macro used in initializing the machvec
*/
#define MARVEL_IACK_SC \
((unsigned long) \
(&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL)))
#ifdef __KERNEL__
/*
* IO7 structs
*/
#define IO7_NUM_PORTS 4
#define IO7_AGP_PORT 3
struct io7_port {
struct io7 *io7;
struct pci_controller *hose;
int enabled;
unsigned int port;
io7_ioport_csrs *csrs;
unsigned long saved_wbase[4];
unsigned long saved_wmask[4];
unsigned long saved_tbase[4];
};
struct io7 {
struct io7 *next;
unsigned int pe;
io7_port7_csrs *csrs;
struct io7_port ports[IO7_NUM_PORTS];
raw_spinlock_t irq_lock;
};
#ifndef __EXTERN_INLINE
# define __EXTERN_INLINE extern inline
# define __IO_EXTERN_INLINE
#endif
/*
* I/O functions. All access through linear space.
*/
/*
* Memory functions. All accesses through linear space.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
extern u8 marvel_ioread8(const void __iomem *);
extern void marvel_iowrite8(u8 b, void __iomem *);
__EXTERN_INLINE u16 marvel_ioread16(const void __iomem *addr)
{
return __kernel_ldwu(*(vusp)addr);
}
__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr)
{
__kernel_stw(b, *(vusp)addr);
}
extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size);
extern void marvel_iounmap(volatile void __iomem *addr);
extern void __iomem *marvel_ioportmap (unsigned long addr);
__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr)
{
return (addr >> 40) & 1;
}
extern int marvel_is_mmio(const volatile void __iomem *);
#undef vucp
#undef vusp
#undef __IO_PREFIX
#define __IO_PREFIX marvel
#define marvel_trivial_rw_bw 1
#define marvel_trivial_rw_lq 1
#define marvel_trivial_io_bw 0
#define marvel_trivial_io_lq 1
#define marvel_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
# undef __EXTERN_INLINE
# undef __IO_EXTERN_INLINE
#endif
#endif /* __KERNEL__ */
#endif /* __ALPHA_MARVEL__H__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* mktables.c
*
* Make RAID-6 tables. This is a host user space program to be run at
* compile time.
*/
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <stdlib.h>
#include <time.h>
static uint8_t gfmul(uint8_t a, uint8_t b)
{
uint8_t v = 0;
while (b) {
if (b & 1)
v ^= a;
a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
b >>= 1;
}
return v;
}
static uint8_t gfpow(uint8_t a, int b)
{
uint8_t v = 1;
b %= 255;
if (b < 0)
b += 255;
while (b) {
if (b & 1)
v = gfmul(v, a);
a = gfmul(a, a);
b >>= 1;
}
return v;
}
int main(int argc, char *argv[])
{
int i, j, k;
uint8_t v;
uint8_t exptbl[256], invtbl[256];
printf("#ifdef __KERNEL__\n");
printf("#include <linux/export.h>\n");
printf("#endif\n");
printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfmul[256][256] =\n"
"{\n");
for (i = 0; i < 256; i++) {
printf("\t{\n");
for (j = 0; j < 256; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, j + k),
(k == 7) ? '\n' : ' ');
}
printf("\t},\n");
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfmul);\n");
printf("#endif\n");
/* Compute vector multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_vgfmul[256][32] =\n"
"{\n");
for (i = 0; i < 256; i++) {
printf("\t{\n");
for (j = 0; j < 16; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, j + k),
(k == 7) ? '\n' : ' ');
}
for (j = 0; j < 16; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, (j + k) << 4),
(k == 7) ? '\n' : ' ');
}
printf("\t},\n");
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_vgfmul);\n");
printf("#endif\n");
/* Compute power-of-2 table (exponent) */
v = 1;
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexp[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
exptbl[i + j] = v;
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
v = gfmul(v, 2);
if (v == 1)
v = 0; /* For entry 255, not a real entry */
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
/* Compute log-of-2 table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gflog[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
v = 255;
for (k = 0; k < 256; k++)
if (exptbl[k] == (i + j)) {
v = k;
break;
}
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gflog);\n");
printf("#endif\n");
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
invtbl[i + j] = v = gfpow(i + j, 254);
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfinv);\n");
printf("#endif\n");
/* Compute inv(2^x + 1) (exponent-xor-inverse) table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexi[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++)
printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
(j == 7) ? '\n' : ' ');
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexi);\n");
printf("#endif\n");
return 0;
}
|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DMUB_DCN30_H_
#define _DMUB_DCN30_H_
#include "dmub_dcn20.h"
/* Registers. */
extern const struct dmub_srv_common_regs dmub_srv_dcn30_regs;
/* Hardware functions. */
void dmub_dcn30_backdoor_load(struct dmub_srv *dmub,
const struct dmub_window *cw0,
const struct dmub_window *cw1);
void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw2,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6,
const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IF_TUNNEL_H_
#define _IF_TUNNEL_H_
#include <linux/ip.h>
#include <linux/in6.h>
#include <uapi/linux/if_tunnel.h>
#include <linux/u64_stats_sync.h>
/*
* Locking : hash tables are protected by RCU and RTNL
*/
#define for_each_ip_tunnel_rcu(pos, start) \
for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
#endif /* _IF_TUNNEL_H_ */
|
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_array_container x) {}
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* pasid.h - PASID idr, table and entry header
*
* Copyright (C) 2018 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#ifndef __INTEL_PASID_H
#define __INTEL_PASID_H
#define PASID_MAX 0x100000
#define PASID_PTE_MASK 0x3F
#define PASID_PTE_PRESENT 1
#define PASID_PTE_FPD 2
#define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20
#define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT)
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
#define PASID_FLAG_NESTED BIT(1)
#define PASID_FLAG_PAGE_SNOOP BIT(2)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
* level translation, otherwise, 4-level paging will be used.
*/
#define PASID_FLAG_FL5LP BIT(1)
struct pasid_dir_entry {
u64 val;
};
struct pasid_entry {
u64 val[8];
};
#define PASID_ENTRY_PGTT_FL_ONLY (1)
#define PASID_ENTRY_PGTT_SL_ONLY (2)
#define PASID_ENTRY_PGTT_NESTED (3)
#define PASID_ENTRY_PGTT_PT (4)
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};
/* Get PRESENT bit of a PASID directory entry. */
static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
{
return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
}
/* Get PASID table from a PASID directory entry. */
static inline struct pasid_entry *
get_pasid_table_from_pde(struct pasid_dir_entry *pde)
{
if (!pasid_pde_is_present(pde))
return NULL;
return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
}
/* Get PRESENT bit of a PASID table entry. */
static inline bool pasid_pte_is_present(struct pasid_entry *pte)
{
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
}
static inline void pasid_clear_entry(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], 0);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
{
u64 old;
old = READ_ONCE(*ptr);
WRITE_ONCE(*ptr, (old & ~mask) | bits);
}
static inline u64 pasid_get_bits(u64 *ptr)
{
return READ_ONCE(*ptr);
}
/*
* Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
* PASID entry.
*/
static inline void
pasid_set_domain_id(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
}
/*
* Get domain ID value of a scalable mode PASID entry.
*/
static inline u16
pasid_get_domain_id(struct pasid_entry *pe)
{
return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
}
/*
* Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_slptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
}
/*
* Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
* entry.
*/
static inline void
pasid_set_address_width(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
}
/*
* Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_translation_type(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
}
/*
* Enable fault processing by clearing the FPD(Fault Processing
* Disable) field (Bit 1) of a scalable mode PASID entry.
*/
static inline void pasid_set_fault_enable(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 1, 0);
}
/*
* Enable second level A/D bits by setting the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
}
/*
* Disable second level A/D bits by clearing the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_clear_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 0);
}
/*
* Checks if second level A/D bits specifically the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry is set.
*/
static inline bool pasid_get_ssade(struct pasid_entry *pe)
{
return pasid_get_bits(&pe->val[0]) & (1 << 9);
}
/*
* Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_sre(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 0, 1);
}
/*
* Setup the WPE(Write Protect Enable) field (Bit 132) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_wpe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
}
/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_present(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 0, 1);
}
/*
* Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*
* Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
* PASID entry.
*/
static inline void
pasid_set_pgsnp(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
}
/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_flptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
}
/*
* Setup the First Level Paging Mode field (Bit 130~131) of a
* scalable mode PASID entry.
*/
static inline void
pasid_set_flpm(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
/*
* Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
* of a scalable mode PASID entry.
*/
static inline void pasid_set_eafe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
}
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
u32 pasid, u16 did, int flags);
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain);
int intel_pasid_replace_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
u32 pasid, u16 did, u16 old_did,
int flags);
int intel_pasid_replace_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u16 old_did,
u32 pasid);
int intel_pasid_replace_pass_through(struct intel_iommu *iommu,
struct device *dev, u16 old_did,
u32 pasid);
int intel_pasid_replace_nested(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
u16 old_did, struct dmar_domain *domain);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
int intel_pasid_setup_sm_context(struct device *dev);
void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
|
/*
* linux/drivers/video/console/fbcon_rotate.c -- Software Rotation
*
* Copyright (C) 2005 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
#include "fbcon_rotate.h"
static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
{
struct fbcon_ops *ops = info->fbcon_par;
int len, err = 0;
int s_cellsize, d_cellsize, i;
const u8 *src;
u8 *dst;
if (vc->vc_font.data == ops->fontdata &&
ops->p->con_rotate == ops->cur_rotate)
goto finished;
src = ops->fontdata = vc->vc_font.data;
ops->cur_rotate = ops->p->con_rotate;
len = vc->vc_font.charcount;
s_cellsize = ((vc->vc_font.width + 7)/8) *
vc->vc_font.height;
d_cellsize = s_cellsize;
if (ops->rotate == FB_ROTATE_CW ||
ops->rotate == FB_ROTATE_CCW)
d_cellsize = ((vc->vc_font.height + 7)/8) *
vc->vc_font.width;
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
if (ops->fd_size < d_cellsize * len) {
dst = kmalloc_array(len, d_cellsize, GFP_KERNEL);
if (dst == NULL) {
err = -ENOMEM;
goto finished;
}
ops->fd_size = d_cellsize * len;
kfree(ops->fontbuffer);
ops->fontbuffer = dst;
}
dst = ops->fontbuffer;
memset(dst, 0, ops->fd_size);
switch (ops->rotate) {
case FB_ROTATE_UD:
for (i = len; i--; ) {
rotate_ud(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
case FB_ROTATE_CW:
for (i = len; i--; ) {
rotate_cw(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
case FB_ROTATE_CCW:
for (i = len; i--; ) {
rotate_ccw(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
}
finished:
return err;
}
void fbcon_set_rotate(struct fbcon_ops *ops)
{
ops->rotate_font = fbcon_rotate_font;
switch(ops->rotate) {
case FB_ROTATE_CW:
fbcon_rotate_cw(ops);
break;
case FB_ROTATE_UD:
fbcon_rotate_ud(ops);
break;
case FB_ROTATE_CCW:
fbcon_rotate_ccw(ops);
break;
}
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* max31722 - hwmon driver for Maxim Integrated MAX31722/MAX31723 SPI
* digital thermometer and thermostats.
*
* Copyright (c) 2016, Intel Corporation.
*/
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#define MAX31722_REG_CFG 0x00
#define MAX31722_REG_TEMP_LSB 0x01
#define MAX31722_MODE_CONTINUOUS 0x00
#define MAX31722_MODE_STANDBY 0x01
#define MAX31722_MODE_MASK 0xFE
#define MAX31722_RESOLUTION_12BIT 0x06
#define MAX31722_WRITE_MASK 0x80
struct max31722_data {
struct device *hwmon_dev;
struct spi_device *spi_device;
u8 mode;
};
static int max31722_set_mode(struct max31722_data *data, u8 mode)
{
int ret;
struct spi_device *spi = data->spi_device;
u8 buf[2] = {
MAX31722_REG_CFG | MAX31722_WRITE_MASK,
(data->mode & MAX31722_MODE_MASK) | mode
};
ret = spi_write(spi, &buf, sizeof(buf));
if (ret < 0) {
dev_err(&spi->dev, "failed to set sensor mode.\n");
return ret;
}
data->mode = (data->mode & MAX31722_MODE_MASK) | mode;
return 0;
}
static ssize_t max31722_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct max31722_data *data = dev_get_drvdata(dev);
ret = spi_w8r16(data->spi_device, MAX31722_REG_TEMP_LSB);
if (ret < 0)
return ret;
/* Keep 12 bits and multiply by the scale of 62.5 millidegrees/bit. */
return sprintf(buf, "%d\n", (s16)le16_to_cpu(ret) * 125 / 32);
}
static SENSOR_DEVICE_ATTR_RO(temp1_input, max31722_temp, 0);
static struct attribute *max31722_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(max31722);
static int max31722_probe(struct spi_device *spi)
{
int ret;
struct max31722_data *data;
data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spi_set_drvdata(spi, data);
data->spi_device = spi;
/*
* Set SD bit to 0 so we can have continuous measurements.
* Set resolution to 12 bits for maximum precision.
*/
data->mode = MAX31722_MODE_CONTINUOUS | MAX31722_RESOLUTION_12BIT;
ret = max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
if (ret < 0)
return ret;
data->hwmon_dev = hwmon_device_register_with_groups(&spi->dev,
spi->modalias,
data,
max31722_groups);
if (IS_ERR(data->hwmon_dev)) {
max31722_set_mode(data, MAX31722_MODE_STANDBY);
return PTR_ERR(data->hwmon_dev);
}
return 0;
}
static void max31722_remove(struct spi_device *spi)
{
struct max31722_data *data = spi_get_drvdata(spi);
int ret;
hwmon_device_unregister(data->hwmon_dev);
ret = max31722_set_mode(data, MAX31722_MODE_STANDBY);
if (ret)
/* There is nothing we can do about this ... */
dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
}
static int max31722_suspend(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
return max31722_set_mode(data, MAX31722_MODE_STANDBY);
}
static int max31722_resume(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
}
static DEFINE_SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
static const struct spi_device_id max31722_spi_id[] = {
{"max31722", 0},
{"max31723", 0},
{}
};
MODULE_DEVICE_TABLE(spi, max31722_spi_id);
static struct spi_driver max31722_driver = {
.driver = {
.name = "max31722",
.pm = pm_sleep_ptr(&max31722_pm_ops),
},
.probe = max31722_probe,
.remove = max31722_remove,
.id_table = max31722_spi_id,
};
module_spi_driver(max31722_driver);
MODULE_AUTHOR("Tiberiu Breana <[email protected]>");
MODULE_DESCRIPTION("max31722 sensor driver");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
md.h : kernel internal structure of the Linux MD driver
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
*/
#ifndef _MD_MD_H
#define _MD_MD_H
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/badblocks.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <trace/events/block.h>
#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
/*
* These flags should really be called "NO_RETRY" rather than
* "FAILFAST" because they don't make any promise about time lapse,
* only about the number of retries, which will be zero.
* REQ_FAILFAST_DRIVER is not included because
* Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
* seems to suggest that the errors it avoids retrying should usually
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
/* Status of sync thread. */
enum sync_action {
/*
* Represent by MD_RECOVERY_SYNC, start when:
* 1) after assemble, sync data from first rdev to other copies, this
* must be done first before other sync actions and will only execute
* once;
* 2) resize the array(notice that this is not reshape), sync data for
* the new range;
*/
ACTION_RESYNC,
/*
* Represent by MD_RECOVERY_RECOVER, start when:
* 1) for new replacement, sync data based on the replace rdev or
* available copies from other rdev;
* 2) for new member disk while the array is degraded, sync data from
* other rdev;
* 3) reassemble after power failure or re-add a hot removed rdev, sync
* data from first rdev to other copies based on bitmap;
*/
ACTION_RECOVER,
/*
* Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
* MD_RECOVERY_CHECK, start when user echo "check" to sysfs api
* sync_action, used to check if data copies from differenct rdev are
* the same. The number of mismatch sectors will be exported to user
* by sysfs api mismatch_cnt;
*/
ACTION_CHECK,
/*
* Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when
* user echo "repair" to sysfs api sync_action, usually paired with
* ACTION_CHECK, used to force syncing data once user found that there
* are inconsistent data,
*/
ACTION_REPAIR,
/*
* Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
* to the conf, notice that this is different from spares or
* replacement;
*/
ACTION_RESHAPE,
/*
* Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action
* or internal usage like setting the array read-only, will forbid above
* actions.
*/
ACTION_FROZEN,
/*
* All above actions don't match.
*/
ACTION_IDLE,
NR_SYNC_ACTIONS,
};
/*
* The struct embedded in rdev is used to serialize IO.
*/
struct serial_in_rdev {
struct rb_root_cached serial_rb;
spinlock_t serial_lock;
wait_queue_head_t serial_io_wait;
};
/*
* MD's 'extended' device
*/
struct md_rdev {
struct list_head same_set; /* RAID devices within the same set */
sector_t sectors; /* Device size (in 512bytes sectors) */
struct mddev *mddev; /* RAID array if running */
int last_events; /* IO event timestamp */
/*
* If meta_bdev is non-NULL, it means that a separate device is
* being used to store the metadata (superblock/bitmap) which
* would otherwise be contained on the same device as the data (bdev).
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
struct file *bdev_file; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;
__u64 sb_events;
sector_t data_offset; /* start of data in array */
sector_t new_data_offset;/* only relevant while reshaping */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
int sb_size; /* bytes in the superblock */
int preferred_minor; /* autorun support */
struct kobject kobj;
/* A device can be in one of three states based on two flags:
* Not working: faulty==1 in_sync==0
* Fully working: faulty==0 in_sync==1
* Working, but not
* in sync with array
* faulty==0 in_sync==0
*
* It can never have faulty==1, in_sync==1
* This reduces the burden of testing multiple flags in many cases
*/
unsigned long flags; /* bit set of 'enum flag_bits' bits. */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
int raid_disk; /* role of device in array */
int new_raid_disk; /* role that the device will have in
* the array after a level-change completes.
*/
int saved_raid_disk; /* role that device used to have in the
* array and could again if we did a partial
* resync from the bitmap
*/
union {
sector_t recovery_offset;/* If this device has been partially
* recovered, this is where we were
* up to.
*/
sector_t journal_tail; /* If this device is a journal device,
* this is the journal tail (journal
* recovery start point)
*/
};
atomic_t nr_pending; /* number of pending requests.
* only maintained for arrays that
* support hot removal
*/
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
time64_t last_read_error; /* monotonic time since our
* last read error
*/
atomic_t corrected_errors; /* number of corrected read errors,
* for reporting to userspace and storing
* in superblock.
*/
struct serial_in_rdev *serial; /* used for raid1 io serialization */
struct kernfs_node *sysfs_state; /* handle for 'state'
* sysfs entry */
/* handle for 'unacknowledged_bad_blocks' sysfs dentry */
struct kernfs_node *sysfs_unack_badblocks;
/* handle for 'bad_blocks' sysfs dentry */
struct kernfs_node *sysfs_badblocks;
struct badblocks badblocks;
struct {
short offset; /* Offset from superblock to start of PPL.
* Not used by external metadata. */
unsigned int size; /* Size in sectors of the PPL space */
sector_t sector; /* First sector of the PPL space */
} ppl;
};
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
Bitmap_sync, /* ..actually, not quite In_sync. Need a
* bitmap-based recovery to get fully in sync.
* The bit is only meaningful before device
* has been passed to pers->hot_add_disk.
*/
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
Blocked, /* An error occurred but has not yet
* been acknowledged by the metadata
* handler, so don't allow writes
* until it is cleared */
WriteErrorSeen, /* A write error has been seen on this
* device
*/
FaultRecorded, /* Intermediate state for clearing
* Blocked. The Fault is/will-be
* recorded in the metadata, but that
* metadata hasn't been stored safely
* on disk yet.
*/
BlockedBadBlocks, /* A writer is blocked because they
* found an unacknowledged bad-block.
* This can safely be cleared at any
* time, and the writer will re-check.
* It may be set at any time, and at
* worst the writer will timeout and
* re-check. So setting it as
* accurately as possible is good, but
* not absolutely critical.
*/
WantReplacement, /* This device is a candidate to be
* hot-replaced, either because it has
* reported some faults, or because
* of explicit request.
*/
Replacement, /* This device is a replacement for
* a want_replacement device with same
* raid_disk number.
*/
Candidate, /* For clustered environments only:
* This device is seen locally but not
* by the whole cluster
*/
Journal, /* This device is used as journal for
* raid-5/6.
* Usually, this device should be faster
* than other devices in the array
*/
ClusterRemove,
ExternalBbl, /* External metadata provides bad
* block management for a disk
*/
FailFast, /* Minimal retries should be attempted on
* this device, so use REQ_FAILFAST_DEV.
* Also don't try to repair failed reads.
* It is expects that no bad block log
* is present.
*/
LastDev, /* Seems to be the last working dev as
* it didn't fail, so don't use FailFast
* any more for metadata
*/
CollisionCheck, /*
* check if there is collision between raid1
* serial bios.
*/
Nonrot, /* non-rotational device (SSD) */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
if (unlikely(rdev->badblocks.count)) {
int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
sectors,
first_bad, bad_sectors);
if (rv)
*first_bad -= rdev->data_offset;
return rv;
}
return 0;
}
static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
int sectors)
{
sector_t first_bad;
int bad_sectors;
return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
}
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
/**
* enum mddev_flags - md device flags.
* @MD_ARRAY_FIRST_USE: First use of array, needs initialization.
* @MD_CLOSING: If set, we are closing the array, do not open it then.
* @MD_JOURNAL_CLEAN: A raid with journal is already clean.
* @MD_HAS_JOURNAL: The raid array has journal feature set.
* @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took
* resync lock, need to release the lock.
* @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as
* calls to md_error() will never cause the array to
* become failed.
* @MD_HAS_PPL: The raid array has PPL feature set.
* @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set.
* @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
* array is ready yet.
* @MD_BROKEN: This is used to stop writes and mark array as failed.
* @MD_DELETED: This device is being deleted
*
* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
*/
enum mddev_flags {
MD_ARRAY_FIRST_USE,
MD_CLOSING,
MD_JOURNAL_CLEAN,
MD_HAS_JOURNAL,
MD_CLUSTER_RESYNC_LOCKED,
MD_FAILFAST_SUPPORTED,
MD_HAS_PPL,
MD_HAS_MULTIPLE_PPLS,
MD_NOT_READY,
MD_BROKEN,
MD_DELETED,
};
enum mddev_sb_flags {
MD_SB_CHANGE_DEVS, /* Some device status has changed */
MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
#define NR_SERIAL_INFOS 8
/* record current range of serialize IOs */
struct serial_info {
struct rb_node node;
sector_t start; /* start sector of rb node */
sector_t last; /* end sector of rb node */
sector_t _subtree_last; /* highest sector in subtree of rb node */
};
/*
* mddev->curr_resync stores the current sector of the resync but
* also has some overloaded values.
*/
enum {
/* No resync in progress */
MD_RESYNC_NONE = 0,
/* Yielded to allow another conflicting resync to commence */
MD_RESYNC_YIELDED = 1,
/* Delayed to check that there is no conflict with another sync */
MD_RESYNC_DELAYED = 2,
/* Any value greater than or equal to this is in an active resync */
MD_RESYNC_ACTIVE = 3,
};
struct mddev {
void *private;
struct md_personality *pers;
dev_t unit;
int md_minor;
struct list_head disks;
unsigned long flags;
unsigned long sb_flags;
int suspended;
struct mutex suspend_mutex;
struct percpu_ref active_io;
int ro;
int sysfs_active; /* set when sysfs deletes
* are happening, so run/
* takeover/stop are not safe
*/
struct gendisk *gendisk;
struct kobject kobj;
int hold_active;
#define UNTIL_IOCTL 1
#define UNTIL_STOP 2
/* Superblock information */
int major_version,
minor_version,
patch_version;
int persistent;
int external; /* metadata is
* managed externally */
char metadata_type[17]; /* externally set*/
int chunk_sectors;
time64_t ctime, utime;
int level, layout;
char clevel[16];
int raid_disks;
int max_disks;
sector_t dev_sectors; /* used size of
* component devices */
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
* externally */
__u64 events;
/* If the last 'event' was simply a clean->dirty transition, and
* we didn't write it to the spares, then it is safe and simple
* to just decrement the event count on a dirty->clean transition.
* So we record that possibility here.
*/
int can_decrease_events;
char uuid[16];
/* If the array is being reshaped, we need to record the
* new shape and an indication of where we are up to.
* This is written to the superblock.
* If reshape_position is MaxSector, then no reshape is happening (yet).
*/
sector_t reshape_position;
int delta_disks, new_level, new_layout;
int new_chunk_sectors;
int reshape_backwards;
struct md_thread __rcu *thread; /* management thread */
struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */
/*
* Set when a sync operation is started. It holds this value even
* when the sync thread is "frozen" (interrupted) or "idle" (stopped
* or finished). It is overwritten when a new sync operation is begun.
*/
enum sync_action last_sync_action;
sector_t curr_resync; /* last block scheduled */
/* As resync requests can complete out of order, we cannot easily track
* how much resync has been completed. So we occasionally pause until
* everything completes, then set curr_resync_completed to curr_resync.
* As such it may be well behind the real resync mark, but it is a value
* we are certain of.
*/
sector_t curr_resync_completed;
unsigned long resync_mark; /* a recent timestamp */
sector_t resync_mark_cnt;/* blocks written at resync_mark */
sector_t curr_mark_cnt; /* blocks scheduled now */
sector_t resync_max_sectors; /* may be set by personality */
atomic64_t resync_mismatches; /* count of sectors where
* parity/replica mismatch found
*/
/* allow user-space to request suspension of IO to regions of the array */
sector_t suspend_lo;
sector_t suspend_hi;
/* if zero, use the system-wide default */
int sync_speed_min;
int sync_speed_max;
/* resync even though the same disks are shared among md-devices */
int parallel_resync;
int ok_start_degraded;
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
* device) will fail due to a read error on the source device, it
* takes a copy of this number and does not attempt recovery again
* until this number changes.
*/
int recovery_disabled;
int in_sync; /* know to not need resync */
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
* that we are never stopping an array while it is open.
* 'reconfig_mutex' protects all other reconfiguration.
* These locks are separate due to conflicting interactions
* with disk->open_mutex.
* Lock ordering is:
* reconfig_mutex -> disk->open_mutex
* disk->open_mutex -> open_mutex: e.g. __blkdev_get -> md_open
*/
struct mutex open_mutex;
struct mutex reconfig_mutex;
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
int changed; /* True if we might need to
* reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
sector_t recovery_cp;
sector_t resync_min; /* user requested sync
* starts here */
sector_t resync_max; /* resync should pause
* when it gets here */
struct kernfs_node *sysfs_state; /* handle for 'array_state'
* file in sysfs.
*/
struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */
struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
struct kernfs_node *sysfs_level; /*handle for 'level' */
/* used for delayed sysfs removal */
struct work_struct del_work;
/* used for register new sync thread */
struct work_struct sync_work;
/* "lock" protects:
* flush_bio transition from NULL to !NULL
* rdev superblocks, events
* clearing MD_CHANGE_*
* in_sync - and related safemode and MD_CHANGE changes
* pers (also protected by reconfig_mutex and pending IO).
* clearing ->bitmap
* clearing ->bitmap_info.file
* changing ->resync_{min,max}
* setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
*/
spinlock_t lock;
wait_queue_head_t sb_wait; /* for waiting on superblock updates */
atomic_t pending_writes; /* number of active superblock writes */
unsigned int safemode; /* if set, update "clean" superblock
* when no writes pending.
*/
unsigned int safemode_delay;
struct timer_list safemode_timer;
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
void *bitmap; /* the bitmap for the device */
struct bitmap_operations *bitmap_ops;
struct {
struct file *file; /* the bitmap file */
loff_t offset; /* offset from superblock of
* start of bitmap. May be
* negative, but not '0'
* For external metadata, offset
* from start of device.
*/
unsigned long space; /* space available at this offset */
loff_t default_offset; /* this is the offset to use when
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
unsigned long default_space; /* space available at
* default offset */
struct mutex mutex;
unsigned long chunksize;
unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
int nodes; /* Maximum number of nodes in the cluster */
char cluster_name[64]; /* Name of the cluster */
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
const struct attribute_group *to_remove;
struct bio_set bio_set;
struct bio_set sync_set; /* for sync operations like
* metadata and bitmap writes
*/
struct bio_set io_clone_set;
struct work_struct event_work; /* used by dm to report failure event */
mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
unsigned int noio_flag; /* for memalloc scope API */
/*
* Temporarily store rdev that will be finally removed when
* reconfig_mutex is unlocked, protected by reconfig_mutex.
*/
struct list_head deleting;
/* The sequence number for sync thread */
atomic_t sync_seq;
bool has_superblocks:1;
bool fail_last_dev:1;
bool serialize_policy:1;
};
enum recovery_flags {
/* flags for sync thread running status */
/*
* set when one of sync action is set and new sync thread need to be
* registered, or just add/remove spares from conf.
*/
MD_RECOVERY_NEEDED,
/* sync thread is running, or about to be started */
MD_RECOVERY_RUNNING,
/* sync thread needs to be aborted for some reason */
MD_RECOVERY_INTR,
/* sync thread is done and is waiting to be unregistered */
MD_RECOVERY_DONE,
/* running sync thread must abort immediately, and not restart */
MD_RECOVERY_FROZEN,
/* waiting for pers->start() to finish */
MD_RECOVERY_WAIT,
/* interrupted because io-error */
MD_RECOVERY_ERROR,
/* flags determines sync action, see details in enum sync_action */
/* if just this flag is set, action is resync. */
MD_RECOVERY_SYNC,
/*
* paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set,
* action is repair, means user requested resync.
*/
MD_RECOVERY_REQUESTED,
/*
* paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is
* check.
*/
MD_RECOVERY_CHECK,
/* recovery, or need to try it */
MD_RECOVERY_RECOVER,
/* reshape */
MD_RECOVERY_RESHAPE,
/* remote node is running resync thread */
MD_RESYNCING_REMOTE,
};
enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};
static inline bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}
static inline bool reshape_interrupted(struct mddev *mddev)
{
/* reshape never start */
if (mddev->reshape_position == MaxSector)
return false;
/* interrupted */
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return true;
/* running reshape will be interrupted soon. */
if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return true;
return false;
}
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
*/
static inline void mddev_lock_nointr(struct mddev *mddev)
{
mutex_lock(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(struct mddev *mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
extern void mddev_unlock(struct mddev *mddev);
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
if (blk_queue_io_stat(bdev->bd_disk->queue))
atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
{
char *name;
int level;
struct list_head list;
struct module *owner;
bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
* requires md_thread should go into start()
*/
int (*run)(struct mddev *mddev);
/* start up works that require md threads */
int (*start)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
sector_t max_sector, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
*/
void (*quiesce) (struct mddev *mddev, int quiesce);
/* takeover is used to transition an array from one
* personality to another. The new personality must be able
* to handle the data in the current layout.
* e.g. 2drive raid1 -> 2drive raid5
* ndrive raid5 -> degraded n+1drive raid6 with special layout
* If the takeover succeeds, a new 'private' structure is returned.
* This needs to be installed and then ->run used to activate the
* array.
*/
void *(*takeover) (struct mddev *mddev);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
};
struct md_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct mddev *, char *);
ssize_t (*store)(struct mddev *, const char *, size_t);
};
extern const struct attribute_group md_bitmap_group;
static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
if (sd)
return sysfs_get_dirent(sd, name);
return sd;
}
static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
{
if (sd)
sysfs_notify_dirent(sd);
}
static inline char * mdname (struct mddev * mddev)
{
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
if (!test_bit(Replacement, &rdev->flags) &&
!test_bit(Journal, &rdev->flags) &&
mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
} else
return 0;
}
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
if (!test_bit(Replacement, &rdev->flags) &&
!test_bit(Journal, &rdev->flags) &&
mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
}
/*
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
*/
#define rdev_for_each_list(rdev, tmp, head) \
list_for_each_entry_safe(rdev, tmp, head, same_set)
/*
* iterates through the 'same array disks' ringlist
*/
#define rdev_for_each(rdev, mddev) \
list_for_each_entry(rdev, &((mddev)->disks), same_set)
#define rdev_for_each_safe(rdev, tmp, mddev) \
list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
#define rdev_for_each_rcu(rdev, mddev) \
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
struct md_thread {
void (*run) (struct md_thread *thread);
struct mddev *mddev;
wait_queue_head_t wqueue;
unsigned long flags;
struct task_struct *tsk;
unsigned long timeout;
void *private;
};
struct md_io_clone {
struct mddev *mddev;
struct bio *orig_bio;
unsigned long start_time;
struct bio bio_clone;
};
#define THREAD_WAKEUP 0
static inline void safe_put_page(struct page *p)
{
if (p) put_page(p);
}
extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p);
extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
struct module *module);
extern int unregister_md_cluster_operations(void);
extern int md_setup_cluster(struct mddev *mddev, int nodes);
extern void md_cluster_stop(struct mddev *mddev);
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
const char *name);
extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp);
extern void md_wakeup_thread(struct md_thread __rcu *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
extern enum sync_action md_sync_action(struct mddev *mddev);
extern enum sync_action md_sync_action_by_name(const char *page);
extern const char *md_sync_action_name(enum sync_action action);
extern void md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, blk_opf_t opf, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern int mddev_init(struct mddev *mddev);
extern void mddev_destroy(struct mddev *mddev);
void md_init_stacking_limits(struct queue_limits *lim);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
extern void md_idle_sync_thread(struct mddev *mddev);
extern void md_frozen_sync_thread(struct mddev *mddev);
extern void md_unfrozen_sync_thread(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
extern void mddev_destroy_serial_pool(struct mddev *mddev,
struct md_rdev *rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline bool is_rdev_broken(struct md_rdev *rdev)
{
return !disk_live(rdev->bdev->bd_disk);
}
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
int faulty = test_bit(Faulty, &rdev->flags);
if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
}
extern const struct md_cluster_operations *md_cluster_ops;
static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
}
/* clear unsupported mddev_flags */
static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
unsigned long unsupported_flags)
{
mddev->flags &= ~unsupported_flags;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
}
static inline int mddev_suspend_and_lock(struct mddev *mddev)
{
int ret;
ret = mddev_suspend(mddev, true);
if (ret)
return ret;
ret = mddev_lock(mddev);
if (ret)
mddev_resume(mddev);
return ret;
}
static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev)
{
mddev_suspend(mddev, false);
mutex_lock(&mddev->reconfig_mutex);
}
static inline void mddev_unlock_and_resume(struct mddev *mddev)
{
mddev_unlock(mddev);
mddev_resume(mddev);
}
struct mdu_array_info_s;
struct mdu_disk_info_s;
extern int mdp_major;
extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
int do_md_run(struct mddev *mddev);
#define MDDEV_STACK_INTEGRITY (1u << 0)
int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
unsigned int flags);
int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);
extern const struct block_device_operations md_fops;
/*
* MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
*/
static inline bool mddev_is_dm(struct mddev *mddev)
{
return !mddev->gendisk;
}
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
sector_t sector)
{
if (!mddev_is_dm(mddev))
trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
}
static inline bool rdev_blocked(struct md_rdev *rdev)
{
/*
* Blocked will be set by error handler and cleared by daemon after
* updating superblock, meanwhile write IO should be blocked to prevent
* reading old data after power failure.
*/
if (test_bit(Blocked, &rdev->flags))
return true;
/*
* Faulty device should not be accessed anymore, there is no need to
* wait for bad block to be acknowledged.
*/
if (test_bit(Faulty, &rdev->flags))
return false;
/* rdev is blocked by badblocks. */
if (test_bit(BlockedBadBlocks, &rdev->flags))
return true;
return false;
}
#define mddev_add_trace_msg(mddev, fmt, args...) \
do { \
if (!mddev_is_dm(mddev)) \
blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
} while (0)
#endif /* _MD_MD_H */
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/usb/of.h>
#include "phy-am335x-control.h"
#include "phy-generic.h"
struct am335x_phy {
struct usb_phy_generic usb_phy_gen;
struct phy_control *phy_ctrl;
int id;
enum usb_dr_mode dr_mode;
};
static int am335x_init(struct usb_phy *phy)
{
struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true);
return 0;
}
static void am335x_shutdown(struct usb_phy *phy)
{
struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
}
static int am335x_phy_probe(struct platform_device *pdev)
{
struct am335x_phy *am_phy;
struct device *dev = &pdev->dev;
int ret;
am_phy = devm_kzalloc(dev, sizeof(*am_phy), GFP_KERNEL);
if (!am_phy)
return -ENOMEM;
am_phy->phy_ctrl = am335x_get_phy_control(dev);
if (!am_phy->phy_ctrl)
return -EPROBE_DEFER;
am_phy->id = of_alias_get_id(pdev->dev.of_node, "phy");
if (am_phy->id < 0) {
dev_err(&pdev->dev, "Missing PHY id: %d\n", am_phy->id);
return am_phy->id;
}
am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
if (ret)
return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
platform_set_drvdata(pdev, am_phy);
device_init_wakeup(dev, true);
/*
* If we leave PHY wakeup enabled then AM33XX wakes up
* immediately from DS0. To avoid this we mark dev->power.can_wakeup
* to false. The same is checked in suspend routine to decide
* on whether to enable PHY wakeup or not.
* PHY wakeup works fine in standby mode, there by allowing us to
* handle remote wakeup, wakeup on disconnect and connect.
*/
device_set_wakeup_enable(dev, false);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
}
static void am335x_phy_remove(struct platform_device *pdev)
{
struct am335x_phy *am_phy = platform_get_drvdata(pdev);
usb_remove_phy(&am_phy->usb_phy_gen.phy);
}
#ifdef CONFIG_PM_SLEEP
static int am335x_phy_suspend(struct device *dev)
{
struct am335x_phy *am_phy = dev_get_drvdata(dev);
/*
* Enable phy wakeup only if dev->power.can_wakeup is true.
* Make sure to enable wakeup to support remote wakeup in
* standby mode ( same is not supported in OFF(DS0) mode).
* Enable it by doing
* echo enabled > /sys/bus/platform/devices/<usb-phy-id>/power/wakeup
*/
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, true);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
return 0;
}
static int am335x_phy_resume(struct device *dev)
{
struct am335x_phy *am_phy = dev_get_drvdata(dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true);
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, false);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(am335x_pm_ops, am335x_phy_suspend, am335x_phy_resume);
static const struct of_device_id am335x_phy_ids[] = {
{ .compatible = "ti,am335x-usb-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, am335x_phy_ids);
static struct platform_driver am335x_phy_driver = {
.probe = am335x_phy_probe,
.remove = am335x_phy_remove,
.driver = {
.name = "am335x-phy-driver",
.pm = &am335x_pm_ops,
.of_match_table = am335x_phy_ids,
},
};
module_platform_driver(am335x_phy_driver);
MODULE_DESCRIPTION("AM335x USB PHY Driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2008 Emcraft Systems
* Sergei Poselenov <[email protected]>
*
* Based on MPC8560 ADS and arch/ppc tqm85xx ports
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2008 Freescale Semiconductor Inc.
*
* Copyright (c) 2005-2006 DENX Software Engineering
* Stefan Roese <[email protected]>
*
* Based on original work by
* Kumar Gala <[email protected]>
* Copyright 2004 Freescale Semiconductor Inc.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
#include "socrates_fpga_pic.h"
static void __init socrates_pic_init(void)
{
struct device_node *np;
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
np = of_find_compatible_node(NULL, NULL, "abb,socrates-fpga-pic");
if (!np) {
printk(KERN_ERR "Could not find socrates-fpga-pic node\n");
return;
}
socrates_fpga_pic_init(np);
of_node_put(np);
}
/*
* Setup the architecture
*/
static void __init socrates_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("socrates_setup_arch()", 0);
fsl_pci_assign_primary();
}
machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
define_machine(socrates) {
.name = "Socrates",
.compatible = "abb,socrates",
.setup_arch = socrates_setup_arch,
.init_IRQ = socrates_pic_init,
.get_irq = mpic_get_irq,
.progress = udbg_progress,
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Collabora Ltd., 2021
*
* futex cmp requeue test by André Almeida <[email protected]>
*/
#include <pthread.h>
#include <sys/shm.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "logging.h"
#include "futextest.h"
#define TEST_NAME "futex-wait"
#define timeout_ns 30000000
#define WAKE_WAIT_US 10000
#define SHM_PATH "futex_shm_file"
void *futex;
void usage(char *prog)
{
printf("Usage: %s\n", prog);
printf(" -c Use color\n");
printf(" -h Display this help message\n");
printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
VQUIET, VCRITICAL, VINFO);
}
static void *waiterfn(void *arg)
{
struct timespec to;
unsigned int flags = 0;
if (arg)
flags = *((unsigned int *) arg);
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
if (futex_wait(futex, 0, &to, flags))
printf("waiter failed errno %d\n", errno);
return NULL;
}
int main(int argc, char *argv[])
{
int res, ret = RET_PASS, fd, c, shm_id;
u_int32_t f_private = 0, *shared_data;
unsigned int flags = FUTEX_PRIVATE_FLAG;
pthread_t waiter;
void *shm;
futex = &f_private;
while ((c = getopt(argc, argv, "cht:v:")) != -1) {
switch (c) {
case 'c':
log_color(1);
break;
case 'h':
usage(basename(argv[0]));
exit(0);
case 'v':
log_verbosity(atoi(optarg));
break;
default:
usage(basename(argv[0]));
exit(1);
}
}
ksft_print_header();
ksft_set_plan(3);
ksft_print_msg("%s: Test futex_wait\n", basename(argv[0]));
/* Testing a private futex */
info("Calling private futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, (void *) &flags))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling private futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, FUTEX_PRIVATE_FLAG);
if (res != 1) {
ksft_test_result_fail("futex_wake private returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake private succeeds\n");
}
/* Testing an anon page shared memory */
shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
if (shm_id < 0) {
perror("shmget");
exit(1);
}
shared_data = shmat(shm_id, NULL, 0);
*shared_data = 0;
futex = shared_data;
info("Calling shared (page anon) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling shared (page anon) futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (page anon) returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (page anon) succeeds\n");
}
/* Testing a file backed shared memory */
fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (fd < 0) {
perror("open");
exit(1);
}
if (ftruncate(fd, sizeof(f_private))) {
perror("ftruncate");
exit(1);
}
shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (shm == MAP_FAILED) {
perror("mmap");
exit(1);
}
memcpy(shm, &f_private, sizeof(f_private));
futex = shm;
info("Calling shared (file backed) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling shared (file backed) futex_wake on futex: %p\n", futex);
res = futex_wake(shm, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (file backed) returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (file backed) succeeds\n");
}
/* Freeing resources */
shmdt(shared_data);
munmap(shm, sizeof(f_private));
remove(SHM_PATH);
close(fd);
ksft_print_cnts();
return ret;
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Handle caching attributes in page tables (PAT)
*
* Authors: Venkatesh Pallipadi <[email protected]>
* Suresh B Siddha <[email protected]>
*
* Interval tree used to store the PAT memory type reservations.
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/interval_tree_generic.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/pgtable.h>
#include <asm/memtype.h>
#include "memtype.h"
/*
* The memtype tree keeps track of memory type for specific
* physical memory areas. Without proper tracking, conflicting memory
* types in different mappings can cause CPU cache corruption.
*
* The tree is an interval tree (augmented rbtree) which tree is ordered
* by the starting address. The tree can contain multiple entries for
* different regions which overlap. All the aliases have the same
* cache attributes of course, as enforced by the PAT logic.
*
* memtype_lock protects the rbtree.
*/
static inline u64 interval_start(struct memtype *entry)
{
return entry->start;
}
static inline u64 interval_end(struct memtype *entry)
{
return entry->end - 1;
}
INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
interval_start, interval_end,
static, interval)
static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
enum {
MEMTYPE_EXACT_MATCH = 0,
MEMTYPE_END_MATCH = 1
};
static struct memtype *memtype_match(u64 start, u64 end, int match_type)
{
struct memtype *entry_match;
entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
while (entry_match != NULL && entry_match->start < end) {
if ((match_type == MEMTYPE_EXACT_MATCH) &&
(entry_match->start == start) && (entry_match->end == end))
return entry_match;
if ((match_type == MEMTYPE_END_MATCH) &&
(entry_match->start < start) && (entry_match->end == end))
return entry_match;
entry_match = interval_iter_next(entry_match, start, end-1);
}
return NULL; /* Returns NULL if there is no match */
}
static int memtype_check_conflict(u64 start, u64 end,
enum page_cache_mode reqtype,
enum page_cache_mode *newtype)
{
struct memtype *entry_match;
enum page_cache_mode found_type = reqtype;
entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
if (entry_match == NULL)
goto success;
if (entry_match->type != found_type && newtype == NULL)
goto failure;
dprintk("Overlap at 0x%Lx-0x%Lx\n", entry_match->start, entry_match->end);
found_type = entry_match->type;
entry_match = interval_iter_next(entry_match, start, end-1);
while (entry_match) {
if (entry_match->type != found_type)
goto failure;
entry_match = interval_iter_next(entry_match, start, end-1);
}
success:
if (newtype)
*newtype = found_type;
return 0;
failure:
pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid, start, end,
cattr_name(found_type), cattr_name(entry_match->type));
return -EBUSY;
}
int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_type)
{
int err = 0;
err = memtype_check_conflict(entry_new->start, entry_new->end, entry_new->type, ret_type);
if (err)
return err;
if (ret_type)
entry_new->type = *ret_type;
interval_insert(entry_new, &memtype_rbroot);
return 0;
}
struct memtype *memtype_erase(u64 start, u64 end)
{
struct memtype *entry_old;
/*
* Since the memtype_rbroot tree allows overlapping ranges,
* memtype_erase() checks with EXACT_MATCH first, i.e. free
* a whole node for the munmap case. If no such entry is found,
* it then checks with END_MATCH, i.e. shrink the size of a node
* from the end for the mremap case.
*/
entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
if (!entry_old) {
entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
if (!entry_old)
return ERR_PTR(-EINVAL);
}
if (entry_old->start == start) {
/* munmap: erase this node */
interval_remove(entry_old, &memtype_rbroot);
} else {
/* mremap: update the end value of this node */
interval_remove(entry_old, &memtype_rbroot);
entry_old->end = start;
interval_insert(entry_old, &memtype_rbroot);
return NULL;
}
return entry_old;
}
struct memtype *memtype_lookup(u64 addr)
{
return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1);
}
/*
* Debugging helper, copy the Nth entry of the tree into a
* a copy for printout. This allows us to print out the tree
* via debugfs, without holding the memtype_lock too long:
*/
#ifdef CONFIG_DEBUG_FS
int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos)
{
struct memtype *entry_match;
int i = 1;
entry_match = interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
while (entry_match && pos != i) {
entry_match = interval_iter_next(entry_match, 0, ULONG_MAX);
i++;
}
if (entry_match) { /* pos == i */
*entry_out = *entry_match;
return 0;
} else {
return 1;
}
}
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Important notes about in-place decompression
*
* At least on x86, the kernel is decompressed in place: the compressed data
* is placed to the end of the output buffer, and the decompressor overwrites
* most of the compressed data. There must be enough safety margin to
* guarantee that the write position is always behind the read position.
*
* The safety margin for ZSTD with a 128 KB block size is calculated below.
* Note that the margin with ZSTD is bigger than with GZIP or XZ!
*
* The worst case for in-place decompression is that the beginning of
* the file is compressed extremely well, and the rest of the file is
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
* The structure of the .zst file in case of a compressed kernel is as follows.
* Maximum sizes (as bytes) of the fields are in parenthesis.
*
* Frame Header: (18)
* Blocks: (N)
* Checksum: (4)
*
* The frame header and checksum overhead is at most 22 bytes.
*
* ZSTD stores the data in blocks. Each block has a header whose size is
* a 3 bytes. After the block header, there is up to 128 KB of payload.
* The maximum uncompressed size of the payload is 128 KB. The minimum
* uncompressed size of the payload is never less than the payload size
* (excluding the block header).
*
* The assumption, that the uncompressed size of the payload is never
* smaller than the payload itself, is valid only when talking about
* the payload as a whole. It is possible that the payload has parts where
* the decompressor consumes more input than it produces output. Calculating
* the worst case for this would be tricky. Instead of trying to do that,
* let's simply make sure that the decompressor never overwrites any bytes
* of the payload which it is currently reading.
*
* Now we have enough information to calculate the safety margin. We need
* - 22 bytes for the .zst file format headers;
* - 3 bytes per every 128 KiB of uncompressed size (one block header per
* block); and
* - 128 KiB (biggest possible zstd block size) to make sure that the
* decompressor never overwrites anything from the block it is currently
* reading.
*
* We get the following formula:
*
* safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072
* <= 22 + (uncompressed_size >> 15) + 131072
*/
/*
* Preboot environments #include "path/to/decompress_unzstd.c".
* All of the source files we depend on must be #included.
* zstd's only source dependency is xxhash, which has no source
* dependencies.
*
* When UNZSTD_PREBOOT is defined we declare __decompress(), which is
* used for kernel decompression, instead of unzstd().
*
* Define __DISABLE_EXPORTS in preboot environments to prevent symbols
* from xxhash and zstd from being exported by the EXPORT_SYMBOL macro.
*/
#ifdef STATIC
# define UNZSTD_PREBOOT
# include "xxhash.c"
# include "zstd/decompress_sources.h"
#else
#include <linux/decompress/unzstd.h>
#endif
#include <linux/decompress/mm.h>
#include <linux/kernel.h>
#include <linux/zstd.h>
/* 128MB is the maximum window size supported by zstd. */
#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX)
/*
* Size of the input and output buffers in multi-call mode.
* Pick a larger size because it isn't used during kernel decompression,
* since that is single pass, and we have to allocate a large buffer for
* zstd's window anyway. The larger size speeds up initramfs decompression.
*/
#define ZSTD_IOBUF_SIZE (1 << 17)
static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
{
const zstd_error_code err = zstd_get_error_code(ret);
if (!zstd_is_error(ret))
return 0;
/*
* zstd_get_error_name() cannot be used because error takes a char *
* not a const char *
*/
switch (err) {
case ZSTD_error_memory_allocation:
error("ZSTD decompressor ran out of memory");
break;
case ZSTD_error_prefix_unknown:
error("Input is not in the ZSTD format (wrong magic bytes)");
break;
case ZSTD_error_dstSize_tooSmall:
case ZSTD_error_corruption_detected:
case ZSTD_error_checksum_wrong:
error("ZSTD-compressed data is corrupt");
break;
default:
error("ZSTD-compressed data is probably corrupt");
break;
}
return -1;
}
/*
* Handle the case where we have the entire input and output in one segment.
* We can allocate less memory (no circular buffer for the sliding window),
* and avoid some memcpy() calls.
*/
static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
long out_len, long *in_pos,
void (*error)(char *x))
{
const size_t wksp_size = zstd_dctx_workspace_bound();
void *wksp = large_malloc(wksp_size);
zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
int err;
size_t ret;
if (dctx == NULL) {
error("Out of memory while allocating zstd_dctx");
err = -1;
goto out;
}
/*
* Find out how large the frame actually is, there may be junk at
* the end of the frame that zstd_decompress_dctx() can't handle.
*/
ret = zstd_find_frame_compressed_size(in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
in_len = (long)ret;
ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
if (in_pos != NULL)
*in_pos = in_len;
err = 0;
out:
if (wksp != NULL)
large_free(wksp);
return err;
}
static int INIT __unzstd(unsigned char *in_buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *in_pos,
void (*error)(char *x))
{
zstd_in_buffer in;
zstd_out_buffer out;
zstd_frame_header header;
void *in_allocated = NULL;
void *out_allocated = NULL;
void *wksp = NULL;
size_t wksp_size;
zstd_dstream *dstream;
int err;
size_t ret;
/*
* ZSTD decompression code won't be happy if the buffer size is so big
* that its end address overflows. When the size is not provided, make
* it as big as possible without having the end address overflow.
*/
if (out_len == 0)
out_len = UINTPTR_MAX - (uintptr_t)out_buf;
if (fill == NULL && flush == NULL)
/*
* We can decompress faster and with less memory when we have a
* single chunk.
*/
return decompress_single(in_buf, in_len, out_buf, out_len,
in_pos, error);
/*
* If in_buf is not provided, we must be using fill(), so allocate
* a large enough buffer. If it is provided, it must be at least
* ZSTD_IOBUF_SIZE large.
*/
if (in_buf == NULL) {
in_allocated = large_malloc(ZSTD_IOBUF_SIZE);
if (in_allocated == NULL) {
error("Out of memory while allocating input buffer");
err = -1;
goto out;
}
in_buf = in_allocated;
in_len = 0;
}
/* Read the first chunk, since we need to decode the frame header. */
if (fill != NULL)
in_len = fill(in_buf, ZSTD_IOBUF_SIZE);
if (in_len < 0) {
error("ZSTD-compressed data is truncated");
err = -1;
goto out;
}
/* Set the first non-empty input buffer. */
in.src = in_buf;
in.pos = 0;
in.size = in_len;
/* Allocate the output buffer if we are using flush(). */
if (flush != NULL) {
out_allocated = large_malloc(ZSTD_IOBUF_SIZE);
if (out_allocated == NULL) {
error("Out of memory while allocating output buffer");
err = -1;
goto out;
}
out_buf = out_allocated;
out_len = ZSTD_IOBUF_SIZE;
}
/* Set the output buffer. */
out.dst = out_buf;
out.pos = 0;
out.size = out_len;
/*
* We need to know the window size to allocate the zstd_dstream.
* Since we are streaming, we need to allocate a buffer for the sliding
* window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
* (8 MB), so it is important to use the actual value so as not to
* waste memory when it is smaller.
*/
ret = zstd_get_frame_header(&header, in.src, in.size);
err = handle_zstd_error(ret, error);
if (err)
goto out;
if (ret != 0) {
error("ZSTD-compressed data has an incomplete frame header");
err = -1;
goto out;
}
if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
error("ZSTD-compressed data has too large a window size");
err = -1;
goto out;
}
/*
* Allocate the zstd_dstream now that we know how much memory is
* required.
*/
wksp_size = zstd_dstream_workspace_bound(header.windowSize);
wksp = large_malloc(wksp_size);
dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
if (dstream == NULL) {
error("Out of memory while allocating ZSTD_DStream");
err = -1;
goto out;
}
/*
* Decompression loop:
* Read more data if necessary (error if no more data can be read).
* Call the decompression function, which returns 0 when finished.
* Flush any data produced if using flush().
*/
if (in_pos != NULL)
*in_pos = 0;
do {
/*
* If we need to reload data, either we have fill() and can
* try to get more data, or we don't and the input is truncated.
*/
if (in.pos == in.size) {
if (in_pos != NULL)
*in_pos += in.pos;
in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1;
if (in_len < 0) {
error("ZSTD-compressed data is truncated");
err = -1;
goto out;
}
in.pos = 0;
in.size = in_len;
}
/* Returns zero when the frame is complete. */
ret = zstd_decompress_stream(dstream, &out, &in);
err = handle_zstd_error(ret, error);
if (err)
goto out;
/* Flush all of the data produced if using flush(). */
if (flush != NULL && out.pos > 0) {
if (out.pos != flush(out.dst, out.pos)) {
error("Failed to flush()");
err = -1;
goto out;
}
out.pos = 0;
}
} while (ret != 0);
if (in_pos != NULL)
*in_pos += in.pos;
err = 0;
out:
if (in_allocated != NULL)
large_free(in_allocated);
if (out_allocated != NULL)
large_free(out_allocated);
if (wksp != NULL)
large_free(wksp);
return err;
}
#ifndef UNZSTD_PREBOOT
STATIC int INIT unzstd(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
long *pos,
void (*error)(char *x))
{
return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error);
}
#else
STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *pos,
void (*error)(char *x))
{
return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error);
}
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_SPARC64_PERCPU__
#define __ARCH_SPARC64_PERCPU__
#include <linux/compiler.h>
#ifndef BUILD_VDSO
register unsigned long __local_per_cpu_offset asm("g5");
#endif
#ifdef CONFIG_SMP
#include <asm/trap_block.h>
#define __per_cpu_offset(__cpu) \
(trap_block[(__cpu)].__per_cpu_base)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __my_cpu_offset __local_per_cpu_offset
#else /* ! SMP */
#endif /* SMP */
#include <asm-generic/percpu.h>
#endif /* __ARCH_SPARC64_PERCPU__ */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2013 Horms Solutions Ltd.
*
* Contact: Simon Horman <[email protected]>
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7779_H__
#define __DT_BINDINGS_CLOCK_R8A7779_H__
/* CPG */
#define R8A7779_CLK_PLLA 0
#define R8A7779_CLK_Z 1
#define R8A7779_CLK_ZS 2
#define R8A7779_CLK_S 3
#define R8A7779_CLK_S1 4
#define R8A7779_CLK_P 5
#define R8A7779_CLK_B 6
#define R8A7779_CLK_OUT 7
/* MSTP 0 */
#define R8A7779_CLK_PWM 5
#define R8A7779_CLK_HSPI 7
#define R8A7779_CLK_TMU2 14
#define R8A7779_CLK_TMU1 15
#define R8A7779_CLK_TMU0 16
#define R8A7779_CLK_HSCIF1 18
#define R8A7779_CLK_HSCIF0 19
#define R8A7779_CLK_SCIF5 21
#define R8A7779_CLK_SCIF4 22
#define R8A7779_CLK_SCIF3 23
#define R8A7779_CLK_SCIF2 24
#define R8A7779_CLK_SCIF1 25
#define R8A7779_CLK_SCIF0 26
#define R8A7779_CLK_I2C3 27
#define R8A7779_CLK_I2C2 28
#define R8A7779_CLK_I2C1 29
#define R8A7779_CLK_I2C0 30
/* MSTP 1 */
#define R8A7779_CLK_USB01 0
#define R8A7779_CLK_USB2 1
#define R8A7779_CLK_DU 3
#define R8A7779_CLK_VIN2 8
#define R8A7779_CLK_VIN1 9
#define R8A7779_CLK_VIN0 10
#define R8A7779_CLK_ETHER 14
#define R8A7779_CLK_SATA 15
#define R8A7779_CLK_PCIE 16
#define R8A7779_CLK_VIN3 20
/* MSTP 3 */
#define R8A7779_CLK_SDHI3 20
#define R8A7779_CLK_SDHI2 21
#define R8A7779_CLK_SDHI1 22
#define R8A7779_CLK_SDHI0 23
#define R8A7779_CLK_MMC1 30
#define R8A7779_CLK_MMC0 31
#endif /* __DT_BINDINGS_CLOCK_R8A7779_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/slab.h>
#include <linux/mISDNif.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/sched/cputime.h>
#include <linux/signal.h>
#include "core.h"
static u_int *debug;
static inline void
_queue_message(struct mISDNstack *st, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
if (*debug & DEBUG_QUEUE_FUNC)
printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
__func__, hh->prim, hh->id, skb);
skb_queue_tail(&st->msgq, skb);
if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
test_and_set_bit(mISDN_STACK_WORK, &st->status);
wake_up_interruptible(&st->workq);
}
}
static int
mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
{
_queue_message(ch->st, skb);
return 0;
}
static struct mISDNchannel *
get_channel4id(struct mISDNstack *st, u_int id)
{
struct mISDNchannel *ch;
mutex_lock(&st->lmutex);
list_for_each_entry(ch, &st->layer2, list) {
if (id == ch->nr)
goto unlock;
}
ch = NULL;
unlock:
mutex_unlock(&st->lmutex);
return ch;
}
static void
send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
{
struct sock *sk;
struct sk_buff *cskb = NULL;
read_lock(&sl->lock);
sk_for_each(sk, &sl->head) {
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
}
if (!sock_queue_rcv_skb(sk, cskb))
cskb = NULL;
}
read_unlock(&sl->lock);
dev_kfree_skb(cskb);
}
static void
send_layer2(struct mISDNstack *st, struct sk_buff *skb)
{
struct sk_buff *cskb;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct mISDNchannel *ch;
int ret;
if (!st)
return;
mutex_lock(&st->lmutex);
if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
list_for_each_entry(ch, &st->layer2, list) {
if (list_is_last(&ch->list, &st->layer2)) {
cskb = skb;
skb = NULL;
} else {
cskb = skb_copy(skb, GFP_KERNEL);
}
if (cskb) {
ret = ch->send(ch, cskb);
if (ret) {
if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s ch%d prim(%x) addr(%x)"
" err %d\n",
__func__, ch->nr,
hh->prim, ch->addr, ret);
dev_kfree_skb(cskb);
}
} else {
printk(KERN_WARNING "%s ch%d addr %x no mem\n",
__func__, ch->nr, ch->addr);
goto out;
}
}
} else {
list_for_each_entry(ch, &st->layer2, list) {
if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
ret = ch->send(ch, skb);
if (!ret)
skb = NULL;
goto out;
}
}
ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
if (!ret)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s mgr prim(%x) err %d\n",
__func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
dev_kfree_skb(skb);
}
static inline int
send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct mISDNchannel *ch;
int lm;
lm = hh->prim & MISDN_LAYERMASK;
if (*debug & DEBUG_QUEUE_FUNC)
printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
__func__, hh->prim, hh->id, skb);
if (lm == 0x1) {
if (!hlist_empty(&st->l1sock.head)) {
__net_timestamp(skb);
send_socklist(&st->l1sock, skb);
}
return st->layer1->send(st->layer1, skb);
} else if (lm == 0x2) {
if (!hlist_empty(&st->l1sock.head))
send_socklist(&st->l1sock, skb);
send_layer2(st, skb);
return 0;
} else if (lm == 0x4) {
ch = get_channel4id(st, hh->id);
if (ch)
return ch->send(ch, skb);
else
printk(KERN_WARNING
"%s: dev(%s) prim(%x) id(%x) no channel\n",
__func__, dev_name(&st->dev->dev), hh->prim,
hh->id);
} else if (lm == 0x8) {
WARN_ON(lm == 0x8);
ch = get_channel4id(st, hh->id);
if (ch)
return ch->send(ch, skb);
else
printk(KERN_WARNING
"%s: dev(%s) prim(%x) id(%x) no channel\n",
__func__, dev_name(&st->dev->dev), hh->prim,
hh->id);
} else {
/* broadcast not handled yet */
printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
__func__, dev_name(&st->dev->dev), hh->prim);
}
return -ESRCH;
}
static void
do_clear_stack(struct mISDNstack *st)
{
}
static int
mISDNStackd(void *data)
{
struct mISDNstack *st = data;
#ifdef MISDN_MSG_STATS
u64 utime, stime;
#endif
int err = 0;
sigfillset(¤t->blocked);
if (*debug & DEBUG_MSG_THREAD)
printk(KERN_DEBUG "mISDNStackd %s started\n",
dev_name(&st->dev->dev));
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
for (;;) {
struct sk_buff *skb;
if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
test_and_clear_bit(mISDN_STACK_WORK, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
} else
test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
while (test_bit(mISDN_STACK_WORK, &st->status)) {
skb = skb_dequeue(&st->msgq);
if (!skb) {
test_and_clear_bit(mISDN_STACK_WORK,
&st->status);
/* test if a race happens */
skb = skb_dequeue(&st->msgq);
if (!skb)
continue;
test_and_set_bit(mISDN_STACK_WORK,
&st->status);
}
#ifdef MISDN_MSG_STATS
st->msg_cnt++;
#endif
err = send_msg_to_layer(st, skb);
if (unlikely(err)) {
if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s: %s prim(%x) id(%x) "
"send call(%d)\n",
__func__, dev_name(&st->dev->dev),
mISDN_HEAD_PRIM(skb),
mISDN_HEAD_ID(skb), err);
dev_kfree_skb(skb);
continue;
}
if (unlikely(test_bit(mISDN_STACK_STOPPED,
&st->status))) {
test_and_clear_bit(mISDN_STACK_WORK,
&st->status);
test_and_clear_bit(mISDN_STACK_RUNNING,
&st->status);
break;
}
}
if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
do_clear_stack(st);
test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
test_and_set_bit(mISDN_STACK_RESTART, &st->status);
}
if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
if (!skb_queue_empty(&st->msgq))
test_and_set_bit(mISDN_STACK_WORK,
&st->status);
}
if (test_bit(mISDN_STACK_ABORT, &st->status))
break;
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
#ifdef MISDN_MSG_STATS
st->sleep_cnt++;
#endif
test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
wait_event_interruptible(st->workq, (st->status &
mISDN_STACK_ACTION_MASK));
if (*debug & DEBUG_MSG_THREAD)
printk(KERN_DEBUG "%s: %s wake status %08lx\n",
__func__, dev_name(&st->dev->dev), st->status);
test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
#ifdef MISDN_MSG_STATS
st->stopped_cnt++;
#endif
}
}
#ifdef MISDN_MSG_STATS
printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
"msg %d sleep %d stopped\n",
dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
st->stopped_cnt);
task_cputime(st->thread, &utime, &stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
dev_name(&st->dev->dev), utime, stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
dev_name(&st->dev->dev));
#endif
test_and_set_bit(mISDN_STACK_KILLED, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
skb_queue_purge(&st->msgq);
st->thread = NULL;
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
return 0;
}
static int
l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
{
if (!ch->st)
return -ENODEV;
__net_timestamp(skb);
_queue_message(ch->st, skb);
return 0;
}
void
set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
{
ch->addr = sapi | (tei << 8);
}
void
__add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
{
list_add_tail(&ch->list, &st->layer2);
}
void
add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
{
mutex_lock(&st->lmutex);
__add_layer2(ch, st);
mutex_unlock(&st->lmutex);
}
static int
st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
if (!ch->st || !ch->st->layer1)
return -EINVAL;
return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
}
int
create_stack(struct mISDNdevice *dev)
{
struct mISDNstack *newst;
int err;
DECLARE_COMPLETION_ONSTACK(done);
newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
if (!newst) {
printk(KERN_ERR "kmalloc mISDN_stack failed\n");
return -ENOMEM;
}
newst->dev = dev;
INIT_LIST_HEAD(&newst->layer2);
INIT_HLIST_HEAD(&newst->l1sock.head);
rwlock_init(&newst->l1sock.lock);
init_waitqueue_head(&newst->workq);
skb_queue_head_init(&newst->msgq);
mutex_init(&newst->lmutex);
dev->D.st = newst;
err = create_teimanager(dev);
if (err) {
printk(KERN_ERR "kmalloc teimanager failed\n");
kfree(newst);
return err;
}
dev->teimgr->peer = &newst->own;
dev->teimgr->recv = mISDN_queue_message;
dev->teimgr->st = newst;
newst->layer1 = &dev->D;
dev->D.recv = l1_receive;
dev->D.peer = &newst->own;
newst->own.st = newst;
newst->own.ctrl = st_own_ctrl;
newst->own.send = mISDN_queue_message;
newst->own.recv = mISDN_queue_message;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s)\n", __func__,
dev_name(&newst->dev->dev));
newst->notify = &done;
newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
dev_name(&newst->dev->dev));
if (IS_ERR(newst->thread)) {
err = PTR_ERR(newst->thread);
printk(KERN_ERR
"mISDN:cannot create kernel thread for %s (%d)\n",
dev_name(&newst->dev->dev), err);
delete_teimanager(dev->teimgr);
kfree(newst);
} else
wait_for_completion(&done);
return err;
}
int
connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
struct channel_req rq;
int err;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol, adr->dev,
adr->channel, adr->sapi, adr->tei);
switch (protocol) {
case ISDN_P_NT_S0:
case ISDN_P_NT_E1:
case ISDN_P_TE_S0:
case ISDN_P_TE_E1:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
ch->st = dev->D.st;
rq.protocol = protocol;
rq.adr.channel = adr->channel;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
dev->id);
if (err)
return err;
write_lock_bh(&dev->D.st->l1sock.lock);
sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
write_unlock_bh(&dev->D.st->l1sock.lock);
break;
default:
return -ENOPROTOOPT;
}
return 0;
}
int
connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct channel_req rq, rq2;
int pmask, err;
struct Bprotocol *bp;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol,
adr->dev, adr->channel, adr->sapi,
adr->tei);
ch->st = dev->D.st;
pmask = 1 << (protocol & ISDN_P_B_MASK);
if (pmask & dev->Bprotocols) {
rq.protocol = protocol;
rq.adr = *adr;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
if (err)
return err;
ch->recv = rq.ch->send;
ch->peer = rq.ch;
rq.ch->recv = ch->send;
rq.ch->peer = ch;
rq.ch->st = dev->D.st;
} else {
bp = get_Bprotocol4mask(pmask);
if (!bp)
return -ENOPROTOOPT;
rq2.protocol = protocol;
rq2.adr = *adr;
rq2.ch = ch;
err = bp->create(&rq2);
if (err)
return err;
ch->recv = rq2.ch->send;
ch->peer = rq2.ch;
rq2.ch->st = dev->D.st;
rq.protocol = rq2.protocol;
rq.adr = *adr;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
if (err) {
rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
return err;
}
rq2.ch->recv = rq.ch->send;
rq2.ch->peer = rq.ch;
rq.ch->recv = rq2.ch->send;
rq.ch->peer = rq2.ch;
rq.ch->st = dev->D.st;
}
ch->protocol = protocol;
ch->nr = rq.ch->nr;
return 0;
}
int
create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct channel_req rq;
int err;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol,
adr->dev, adr->channel, adr->sapi,
adr->tei);
rq.protocol = ISDN_P_TE_S0;
if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
rq.protocol = ISDN_P_TE_E1;
switch (protocol) {
case ISDN_P_LAPD_NT:
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
fallthrough;
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
ch->st = dev->D.st;
rq.adr.channel = 0;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
if (err)
break;
rq.protocol = protocol;
rq.adr = *adr;
rq.ch = ch;
err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
if (!err) {
if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
break;
add_layer2(rq.ch, dev->D.st);
rq.ch->recv = mISDN_queue_message;
rq.ch->peer = &dev->D.st->own;
rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
}
break;
default:
err = -EPROTONOSUPPORT;
}
return err;
}
void
delete_channel(struct mISDNchannel *ch)
{
struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
struct mISDNchannel *pch;
if (!ch->st) {
printk(KERN_WARNING "%s: no stack\n", __func__);
return;
}
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
dev_name(&ch->st->dev->dev), ch->protocol);
if (ch->protocol >= ISDN_P_B_START) {
if (ch->peer) {
ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
ch->peer = NULL;
}
return;
}
switch (ch->protocol) {
case ISDN_P_NT_S0:
case ISDN_P_TE_S0:
case ISDN_P_NT_E1:
case ISDN_P_TE_E1:
write_lock_bh(&ch->st->l1sock.lock);
sk_del_node_init(&msk->sk);
write_unlock_bh(&ch->st->l1sock.lock);
ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
break;
case ISDN_P_LAPD_TE:
pch = get_channel4id(ch->st, ch->nr);
if (pch) {
mutex_lock(&ch->st->lmutex);
list_del(&pch->list);
mutex_unlock(&ch->st->lmutex);
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
pch = ch->st->dev->teimgr;
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
} else
printk(KERN_WARNING "%s: no l2 channel\n",
__func__);
break;
case ISDN_P_LAPD_NT:
pch = ch->st->dev->teimgr;
if (pch) {
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
} else
printk(KERN_WARNING "%s: no l2 channel\n",
__func__);
break;
default:
break;
}
return;
}
void
delete_stack(struct mISDNdevice *dev)
{
struct mISDNstack *st = dev->D.st;
DECLARE_COMPLETION_ONSTACK(done);
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s)\n", __func__,
dev_name(&st->dev->dev));
if (dev->teimgr)
delete_teimanager(dev->teimgr);
if (st->thread) {
if (st->notify) {
printk(KERN_WARNING "%s: notifier in use\n",
__func__);
complete(st->notify);
}
st->notify = &done;
test_and_set_bit(mISDN_STACK_ABORT, &st->status);
test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
wake_up_interruptible(&st->workq);
wait_for_completion(&done);
}
if (!list_empty(&st->layer2))
printk(KERN_WARNING "%s: layer2 list not empty\n",
__func__);
if (!hlist_empty(&st->l1sock.head))
printk(KERN_WARNING "%s: layer1 list not empty\n",
__func__);
kfree(st);
}
void
mISDN_initstack(u_int *dp)
{
debug = dp;
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2022 - All Rights Reserved
* Author: Gabriel Fernandez <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "clk-stm32-core.h"
#include "reset-stm32.h"
static DEFINE_SPINLOCK(rlock);
static int stm32_rcc_clock_init(struct device *dev,
const struct of_device_id *match,
void __iomem *base)
{
const struct stm32_rcc_match_data *data = match->data;
struct clk_hw_onecell_data *clk_data = data->hw_clks;
struct clk_hw **hws;
int n, max_binding;
max_binding = data->maxbinding;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->num = max_binding;
hws = clk_data->hws;
for (n = 0; n < max_binding; n++)
hws[n] = ERR_PTR(-ENOENT);
for (n = 0; n < data->num_clocks; n++) {
const struct clock_config *cfg_clock = &data->tab_clocks[n];
struct clk_hw *hw = ERR_PTR(-ENOENT);
if (data->check_security &&
data->check_security(dev->of_node, base, cfg_clock))
continue;
if (cfg_clock->func)
hw = (*cfg_clock->func)(dev, data, base, &rlock,
cfg_clock);
if (IS_ERR(hw)) {
dev_err(dev, "Can't register clk %d: %ld\n", n,
PTR_ERR(hw));
return PTR_ERR(hw);
}
if (cfg_clock->id != NO_ID)
hws[cfg_clock->id] = hw;
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
}
int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
void __iomem *base)
{
const struct stm32_rcc_match_data *rcc_match_data;
const struct of_device_id *match;
int err;
match = of_match_node(match_data, dev_of_node(dev));
if (!match) {
dev_err(dev, "match data not found\n");
return -ENODEV;
}
rcc_match_data = match->data;
/* RCC Reset Configuration */
err = stm32_rcc_reset_init(dev, rcc_match_data->reset_data, base);
if (err) {
pr_err("stm32 reset failed to initialize\n");
return err;
}
/* RCC Clock Configuration */
err = stm32_rcc_clock_init(dev, match, base);
if (err) {
pr_err("stm32 clock failed to initialize\n");
return err;
}
return 0;
}
static u8 stm32_mux_get_parent(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 mux_id)
{
const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
u32 mask = BIT(mux->width) - 1;
u32 val;
val = readl(base + mux->offset) >> mux->shift;
val &= mask;
return val;
}
static int stm32_mux_set_parent(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 mux_id, u8 index)
{
const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
u32 mask = BIT(mux->width) - 1;
u32 reg = readl(base + mux->offset);
u32 val = index << mux->shift;
reg &= ~(mask << mux->shift);
reg |= val;
writel(reg, base + mux->offset);
return 0;
}
static void stm32_gate_endisable(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 gate_id, int enable)
{
const struct stm32_gate_cfg *gate = &data->gates[gate_id];
void __iomem *addr = base + gate->offset;
if (enable) {
if (data->gate_cpt[gate_id]++ > 0)
return;
if (gate->set_clr != 0)
writel(BIT(gate->bit_idx), addr);
else
writel(readl(addr) | BIT(gate->bit_idx), addr);
} else {
if (--data->gate_cpt[gate_id] > 0)
return;
if (gate->set_clr != 0)
writel(BIT(gate->bit_idx), addr + gate->set_clr);
else
writel(readl(addr) & ~BIT(gate->bit_idx), addr);
}
}
static void stm32_gate_disable_unused(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 gate_id)
{
const struct stm32_gate_cfg *gate = &data->gates[gate_id];
void __iomem *addr = base + gate->offset;
if (data->gate_cpt[gate_id] > 0)
return;
if (gate->set_clr != 0)
writel(BIT(gate->bit_idx), addr + gate->set_clr);
else
writel(readl(addr) & ~BIT(gate->bit_idx), addr);
}
static int stm32_gate_is_enabled(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 gate_id)
{
const struct stm32_gate_cfg *gate = &data->gates[gate_id];
return (readl(base + gate->offset) & BIT(gate->bit_idx)) != 0;
}
static unsigned int _get_table_div(const struct clk_div_table *table,
unsigned int val)
{
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->val == val)
return clkt->div;
return 0;
}
static unsigned int _get_div(const struct clk_div_table *table,
unsigned int val, unsigned long flags, u8 width)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return val;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << val;
if (table)
return _get_table_div(table, val);
return val + 1;
}
static unsigned long stm32_divider_get_rate(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 div_id,
unsigned long parent_rate)
{
const struct stm32_div_cfg *divider = &data->dividers[div_id];
unsigned int val;
unsigned int div;
val = readl(base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
div = _get_div(divider->table, val, divider->flags, divider->width);
if (!div) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%d: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
div_id);
return parent_rate;
}
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
static int stm32_divider_set_rate(void __iomem *base,
struct clk_stm32_clock_data *data,
u16 div_id, unsigned long rate,
unsigned long parent_rate)
{
const struct stm32_div_cfg *divider = &data->dividers[div_id];
int value;
u32 val;
value = divider_get_val(rate, parent_rate, divider->table,
divider->width, divider->flags);
if (value < 0)
return value;
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
val = readl(base + divider->offset);
val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
writel(val, base + divider->offset);
return 0;
}
static u8 clk_stm32_mux_get_parent(struct clk_hw *hw)
{
struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
return stm32_mux_get_parent(mux->base, mux->clock_data, mux->mux_id);
}
static int clk_stm32_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
unsigned long flags = 0;
spin_lock_irqsave(mux->lock, flags);
stm32_mux_set_parent(mux->base, mux->clock_data, mux->mux_id, index);
spin_unlock_irqrestore(mux->lock, flags);
return 0;
}
const struct clk_ops clk_stm32_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.get_parent = clk_stm32_mux_get_parent,
.set_parent = clk_stm32_mux_set_parent,
};
static void clk_stm32_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
unsigned long flags = 0;
spin_lock_irqsave(gate->lock, flags);
stm32_gate_endisable(gate->base, gate->clock_data, gate->gate_id, enable);
spin_unlock_irqrestore(gate->lock, flags);
}
static int clk_stm32_gate_enable(struct clk_hw *hw)
{
clk_stm32_gate_endisable(hw, 1);
return 0;
}
static void clk_stm32_gate_disable(struct clk_hw *hw)
{
clk_stm32_gate_endisable(hw, 0);
}
static int clk_stm32_gate_is_enabled(struct clk_hw *hw)
{
struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
return stm32_gate_is_enabled(gate->base, gate->clock_data, gate->gate_id);
}
static void clk_stm32_gate_disable_unused(struct clk_hw *hw)
{
struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
unsigned long flags = 0;
spin_lock_irqsave(gate->lock, flags);
stm32_gate_disable_unused(gate->base, gate->clock_data, gate->gate_id);
spin_unlock_irqrestore(gate->lock, flags);
}
const struct clk_ops clk_stm32_gate_ops = {
.enable = clk_stm32_gate_enable,
.disable = clk_stm32_gate_disable,
.is_enabled = clk_stm32_gate_is_enabled,
.disable_unused = clk_stm32_gate_disable_unused,
};
static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
unsigned long flags = 0;
int ret;
if (div->div_id == NO_STM32_DIV)
return rate;
spin_lock_irqsave(div->lock, flags);
ret = stm32_divider_set_rate(div->base, div->clock_data, div->div_id, rate, parent_rate);
spin_unlock_irqrestore(div->lock, flags);
return ret;
}
static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
const struct stm32_div_cfg *divider;
if (div->div_id == NO_STM32_DIV)
return rate;
divider = &div->clock_data->dividers[div->div_id];
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = readl(div->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_round_rate(hw, rate, prate, divider->table,
divider->width, divider->flags,
val);
}
return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
rate, prate, divider->table,
divider->width, divider->flags);
}
static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
if (div->div_id == NO_STM32_DIV)
return parent_rate;
return stm32_divider_get_rate(div->base, div->clock_data, div->div_id, parent_rate);
}
const struct clk_ops clk_stm32_divider_ops = {
.recalc_rate = clk_stm32_divider_recalc_rate,
.round_rate = clk_stm32_divider_round_rate,
.set_rate = clk_stm32_divider_set_rate,
};
static int clk_stm32_composite_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
unsigned long flags = 0;
int ret;
if (composite->div_id == NO_STM32_DIV)
return rate;
spin_lock_irqsave(composite->lock, flags);
ret = stm32_divider_set_rate(composite->base, composite->clock_data,
composite->div_id, rate, parent_rate);
spin_unlock_irqrestore(composite->lock, flags);
return ret;
}
static unsigned long clk_stm32_composite_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
if (composite->div_id == NO_STM32_DIV)
return parent_rate;
return stm32_divider_get_rate(composite->base, composite->clock_data,
composite->div_id, parent_rate);
}
static int clk_stm32_composite_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
const struct stm32_div_cfg *divider;
long rate;
if (composite->div_id == NO_STM32_DIV)
return 0;
divider = &composite->clock_data->dividers[composite->div_id];
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = readl(composite->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
rate = divider_ro_round_rate(hw, req->rate, &req->best_parent_rate,
divider->table, divider->width, divider->flags,
val);
if (rate < 0)
return rate;
req->rate = rate;
return 0;
}
rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw),
req->rate, &req->best_parent_rate,
divider->table, divider->width, divider->flags);
if (rate < 0)
return rate;
req->rate = rate;
return 0;
}
static u8 clk_stm32_composite_get_parent(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
return stm32_mux_get_parent(composite->base, composite->clock_data, composite->mux_id);
}
static int clk_stm32_composite_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
unsigned long flags = 0;
spin_lock_irqsave(composite->lock, flags);
stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, index);
spin_unlock_irqrestore(composite->lock, flags);
if (composite->clock_data->is_multi_mux) {
struct clk_hw *other_mux_hw = composite->clock_data->is_multi_mux(hw);
if (other_mux_hw) {
struct clk_hw *hwp = clk_hw_get_parent_by_index(hw, index);
clk_hw_reparent(other_mux_hw, hwp);
}
}
return 0;
}
static int clk_stm32_composite_is_enabled(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
if (composite->gate_id == NO_STM32_GATE)
return (__clk_get_enable_count(hw->clk) > 0);
return stm32_gate_is_enabled(composite->base, composite->clock_data, composite->gate_id);
}
#define MUX_SAFE_POSITION 0
static int clk_stm32_has_safe_mux(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
const struct stm32_mux_cfg *mux = &composite->clock_data->muxes[composite->mux_id];
return !!(mux->flags & MUX_SAFE);
}
static void clk_stm32_set_safe_position_mux(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
if (!clk_stm32_composite_is_enabled(hw)) {
unsigned long flags = 0;
if (composite->clock_data->is_multi_mux) {
struct clk_hw *other_mux_hw = NULL;
other_mux_hw = composite->clock_data->is_multi_mux(hw);
if (!other_mux_hw || clk_stm32_composite_is_enabled(other_mux_hw))
return;
}
spin_lock_irqsave(composite->lock, flags);
stm32_mux_set_parent(composite->base, composite->clock_data,
composite->mux_id, MUX_SAFE_POSITION);
spin_unlock_irqrestore(composite->lock, flags);
}
}
static void clk_stm32_safe_restore_position_mux(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
int sel = clk_hw_get_parent_index(hw);
unsigned long flags = 0;
spin_lock_irqsave(composite->lock, flags);
stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, sel);
spin_unlock_irqrestore(composite->lock, flags);
}
static void clk_stm32_composite_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
unsigned long flags = 0;
spin_lock_irqsave(composite->lock, flags);
stm32_gate_endisable(composite->base, composite->clock_data, composite->gate_id, enable);
spin_unlock_irqrestore(composite->lock, flags);
}
static int clk_stm32_composite_gate_enable(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
if (composite->gate_id == NO_STM32_GATE)
return 0;
clk_stm32_composite_gate_endisable(hw, 1);
if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
clk_stm32_safe_restore_position_mux(hw);
return 0;
}
static void clk_stm32_composite_gate_disable(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
if (composite->gate_id == NO_STM32_GATE)
return;
clk_stm32_composite_gate_endisable(hw, 0);
if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
clk_stm32_set_safe_position_mux(hw);
}
static void clk_stm32_composite_disable_unused(struct clk_hw *hw)
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
unsigned long flags = 0;
if (composite->gate_id == NO_STM32_GATE)
return;
spin_lock_irqsave(composite->lock, flags);
stm32_gate_disable_unused(composite->base, composite->clock_data, composite->gate_id);
spin_unlock_irqrestore(composite->lock, flags);
}
const struct clk_ops clk_stm32_composite_ops = {
.set_rate = clk_stm32_composite_set_rate,
.recalc_rate = clk_stm32_composite_recalc_rate,
.determine_rate = clk_stm32_composite_determine_rate,
.get_parent = clk_stm32_composite_get_parent,
.set_parent = clk_stm32_composite_set_parent,
.enable = clk_stm32_composite_gate_enable,
.disable = clk_stm32_composite_gate_disable,
.is_enabled = clk_stm32_composite_is_enabled,
.disable_unused = clk_stm32_composite_disable_unused,
};
struct clk_hw *clk_stm32_mux_register(struct device *dev,
const struct stm32_rcc_match_data *data,
void __iomem *base,
spinlock_t *lock,
const struct clock_config *cfg)
{
struct clk_stm32_mux *mux = cfg->clock_cfg;
struct clk_hw *hw = &mux->hw;
int err;
mux->base = base;
mux->lock = lock;
mux->clock_data = data->clock_data;
err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
struct clk_hw *clk_stm32_gate_register(struct device *dev,
const struct stm32_rcc_match_data *data,
void __iomem *base,
spinlock_t *lock,
const struct clock_config *cfg)
{
struct clk_stm32_gate *gate = cfg->clock_cfg;
struct clk_hw *hw = &gate->hw;
int err;
gate->base = base;
gate->lock = lock;
gate->clock_data = data->clock_data;
err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
struct clk_hw *clk_stm32_div_register(struct device *dev,
const struct stm32_rcc_match_data *data,
void __iomem *base,
spinlock_t *lock,
const struct clock_config *cfg)
{
struct clk_stm32_div *div = cfg->clock_cfg;
struct clk_hw *hw = &div->hw;
int err;
div->base = base;
div->lock = lock;
div->clock_data = data->clock_data;
err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
struct clk_hw *clk_stm32_composite_register(struct device *dev,
const struct stm32_rcc_match_data *data,
void __iomem *base,
spinlock_t *lock,
const struct clock_config *cfg)
{
struct clk_stm32_composite *composite = cfg->clock_cfg;
struct clk_hw *hw = &composite->hw;
int err;
composite->base = base;
composite->lock = lock;
composite->clock_data = data->clock_data;
err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2006 Intel Corporation. */
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
*
* Valid Range: 0, 10, 100, 1000
* - 0 - auto-negotiate at all supported speeds
* - 10 - only link at 10 Mbps
* - 100 - only link at 100 Mbps
* - 1000 - only link at 1000 Mbps
*
* Default Value: 0
*/
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
*
* Valid Range: 0-2
* - 0 - auto-negotiate for duplex
* - 1 - only link at half duplex
* - 2 - only link at full duplex
*
* Default Value: 0
*/
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
*
* Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
*
* The AutoNeg value is a bit mask describing which speed and duplex
* combinations should be advertised during auto-negotiation.
* The supported speed and duplex modes are listed below
*
* Bit 7 6 5 4 3 2 1 0
* Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
* Duplex Full Full Half Full Half
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82543 and newer -based NICs
*
* Default Value: 1
*/
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define DEFAULT_RDTR 0
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define DEFAULT_RADV 8
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
e_dev_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
static void e1000_check_fiber_options(struct e1000_adapter *adapter);
static void e1000_check_copper_options(struct e1000_adapter *adapter);
/**
* e1000_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
e_dev_warn("Warning: no configuration for board #%i "
"using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
struct e1000_tx_ring *tx_ring = adapter->tx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_TXD),
.def = E1000_DEFAULT_TXD,
.arg = { .r = {
.min = E1000_MIN_TXD,
.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
}}
};
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
e1000_validate_option(&tx_ring->count, &opt, adapter);
tx_ring->count = ALIGN(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
struct e1000_rx_ring *rx_ring = adapter->rx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_RXD),
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD
}}
};
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
e1000_validate_option(&rx_ring->count, &opt, adapter);
rx_ring->count = ALIGN(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
opt = (struct e1000_option) {
.type = enable_option,
.name = "Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
e1000_validate_option(&rx_csum, &opt, adapter);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
static const struct e1000_opt_list fc_list[] = {
{ E1000_FC_NONE, "Flow Control Disabled" },
{ E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
{ E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
{ E1000_FC_FULL, "Flow Control Enabled" },
{ E1000_FC_DEFAULT, "Flow Control Hardware Default" }
};
opt = (struct e1000_option) {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = E1000_FC_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
}
{ /* Transmit Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY }}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY }}
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
{ /* Receive Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY }}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Receive Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY }}
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
{ /* Interrupt Throttling Rate */
opt = (struct e1000_option) {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR }}
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
e_dev_info("%s turned off\n", opt.name);
break;
case 1:
e_dev_info("%s set to dynamic mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
e_dev_info("%s set to dynamic conservative "
"mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 4:
e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
* used as control
*/
adapter->itr_setting = adapter->itr & ~3;
break;
}
} else {
adapter->itr_setting = opt.def;
adapter->itr = 20000;
}
}
{ /* Smart Power Down */
opt = (struct e1000_option) {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
adapter->smart_power_down = spd;
} else {
adapter->smart_power_down = opt.def;
}
}
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
e1000_check_fiber_options(adapter);
break;
case e1000_media_type_copper:
e1000_check_copper_options(adapter);
break;
default:
BUG();
}
}
/**
* e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
* @adapter: board private structure
*
* Handles speed and duplex options on fiber adapters
**/
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
e_dev_info("Speed not valid for fiber adapters, parameter "
"ignored\n");
}
if (num_Duplex > bd) {
e_dev_info("Duplex not valid for fiber adapters, parameter "
"ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
"adapters, parameter ignored\n");
}
}
/**
* e1000_check_copper_options - Range Checking for Link Options, Copper Version
* @adapter: board private structure
*
* Handles speed and duplex options on copper adapters
**/
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
static const struct e1000_opt_list speed_list[] = {
{ 0, "" },
{ SPEED_10, "" },
{ SPEED_100, "" },
{ SPEED_1000, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Speed",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(speed_list),
.p = speed_list }}
};
if (num_Speed > bd) {
speed = Speed[bd];
e1000_validate_option(&speed, &opt, adapter);
} else {
speed = opt.def;
}
}
{ /* Duplex */
static const struct e1000_opt_list dplx_list[] = {
{ 0, "" },
{ HALF_DUPLEX, "" },
{ FULL_DUPLEX, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Duplex",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
.p = dplx_list }}
};
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
} else {
dplx = opt.def;
}
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
e_dev_info("AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
#define AA "AutoNeg advertising "
{{ 0x01, AA "10/HD" },
{ 0x02, AA "10/FD" },
{ 0x03, AA "10/FD, 10/HD" },
{ 0x04, AA "100/HD" },
{ 0x05, AA "100/HD, 10/HD" },
{ 0x06, AA "100/HD, 10/FD" },
{ 0x07, AA "100/HD, 10/FD, 10/HD" },
{ 0x08, AA "100/FD" },
{ 0x09, AA "100/FD, 10/HD" },
{ 0x0a, AA "100/FD, 10/FD" },
{ 0x0b, AA "100/FD, 10/FD, 10/HD" },
{ 0x0c, AA "100/FD, 100/HD" },
{ 0x0d, AA "100/FD, 100/HD, 10/HD" },
{ 0x0e, AA "100/FD, 100/HD, 10/FD" },
{ 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
{ 0x20, AA "1000/FD" },
{ 0x21, AA "1000/FD, 10/HD" },
{ 0x22, AA "1000/FD, 10/FD" },
{ 0x23, AA "1000/FD, 10/FD, 10/HD" },
{ 0x24, AA "1000/FD, 100/HD" },
{ 0x25, AA "1000/FD, 100/HD, 10/HD" },
{ 0x26, AA "1000/FD, 100/HD, 10/FD" },
{ 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
{ 0x28, AA "1000/FD, 100/FD" },
{ 0x29, AA "1000/FD, 100/FD, 10/HD" },
{ 0x2a, AA "1000/FD, 100/FD, 10/FD" },
{ 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
{ 0x2c, AA "1000/FD, 100/FD, 100/HD" },
{ 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
{ 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
{ 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "AutoNeg",
.err = "parameter ignored",
.def = AUTONEG_ADV_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(an_list),
.p = an_list }}
};
if (num_AutoNeg > bd) {
an = AutoNeg[bd];
e1000_validate_option(&an, &opt, adapter);
} else {
an = opt.def;
}
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
e_dev_info("Speed and duplex autonegotiation "
"enabled\n");
break;
case HALF_DUPLEX:
e_dev_info("Half Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
e_dev_info("100 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
fallthrough;
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
"only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
default:
BUG();
}
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
"Setting MDI-X to a compatible value.\n");
}
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019 Renesas Electronics Corporation
* Copyright (C) 2016 Laurent Pinchart <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
struct lvds_codec {
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
struct drm_bridge_timings timings;
struct regulator *vcc;
struct gpio_desc *powerdown_gpio;
u32 connector_type;
unsigned int bus_format;
};
static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
{
return container_of(bridge, struct lvds_codec, bridge);
}
static int lvds_codec_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
bridge, flags);
}
static void lvds_codec_enable(struct drm_bridge *bridge)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
int ret;
ret = regulator_enable(lvds_codec->vcc);
if (ret) {
dev_err(lvds_codec->dev,
"Failed to enable regulator \"vcc\": %d\n", ret);
return;
}
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
}
static void lvds_codec_disable(struct drm_bridge *bridge)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
int ret;
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
ret = regulator_disable(lvds_codec->vcc);
if (ret)
dev_err(lvds_codec->dev,
"Failed to disable regulator \"vcc\": %d\n", ret);
}
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
lvds_codec_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = lvds_codec->bus_format;
*num_input_fmts = MAX_INPUT_SEL_FORMATS;
return input_fmts;
}
static const struct drm_bridge_funcs funcs = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_get_input_bus_fmts = lvds_codec_atomic_get_input_bus_fmts,
};
static int lvds_codec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node;
struct device_node *bus_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
u32 val;
int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
if (!lvds_codec)
return -ENOMEM;
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power");
if (IS_ERR(lvds_codec->vcc))
return dev_err_probe(dev, PTR_ERR(lvds_codec->vcc),
"Unable to get \"vcc\" supply\n");
lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(lvds_codec->powerdown_gpio))
return dev_err_probe(dev, PTR_ERR(lvds_codec->powerdown_gpio),
"powerdown GPIO failure\n");
/* Locate the panel DT node. */
panel_node = of_graph_get_remote_node(dev->of_node, 1, 0);
if (!panel_node) {
dev_dbg(dev, "panel DT node not found\n");
return -ENXIO;
}
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
if (IS_ERR(panel)) {
dev_dbg(dev, "panel not found, deferring probe\n");
return PTR_ERR(panel);
}
lvds_codec->panel_bridge =
devm_drm_panel_bridge_add_typed(dev, panel,
lvds_codec->connector_type);
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
lvds_codec->bridge.funcs = &funcs;
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
* case data-mapping is not present, do nothing, since there are still
* legacy bindings which do not specify this property.
*/
if (lvds_codec->connector_type != DRM_MODE_CONNECTOR_LVDS) {
bus_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!bus_node) {
dev_dbg(dev, "bus DT node not found\n");
return -ENXIO;
}
ret = drm_of_lvds_get_data_mapping(bus_node);
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
} else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
lvds_codec->bus_format = ret;
}
} else {
lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
}
/*
* Encoder might sample data on different clock edge than the display,
* for example OnSemi FIN3385 has a dedicated strapping pin to select
* the sampling edge.
*/
if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS &&
!of_property_read_u32(dev->of_node, "pclk-sample", &val)) {
lvds_codec->timings.input_bus_flags = val ?
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE :
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
}
/*
* The panel_bridge bridge is attached to the panel's of_node,
* but we need a bridge attached to our of_node for our user
* to look up.
*/
lvds_codec->bridge.of_node = dev->of_node;
lvds_codec->bridge.timings = &lvds_codec->timings;
drm_bridge_add(&lvds_codec->bridge);
platform_set_drvdata(pdev, lvds_codec);
return 0;
}
static void lvds_codec_remove(struct platform_device *pdev)
{
struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
drm_bridge_remove(&lvds_codec->bridge);
}
static const struct of_device_id lvds_codec_match[] = {
{
.compatible = "lvds-decoder",
.data = (void *)DRM_MODE_CONNECTOR_DPI,
},
{
.compatible = "lvds-encoder",
.data = (void *)DRM_MODE_CONNECTOR_LVDS,
},
{
.compatible = "thine,thc63lvdm83d",
.data = (void *)DRM_MODE_CONNECTOR_LVDS,
},
{},
};
MODULE_DEVICE_TABLE(of, lvds_codec_match);
static struct platform_driver lvds_codec_driver = {
.probe = lvds_codec_probe,
.remove = lvds_codec_remove,
.driver = {
.name = "lvds-codec",
.of_match_table = lvds_codec_match,
},
};
module_platform_driver(lvds_codec_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("LVDS encoders and decoders");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mcpm.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
*/
#ifndef MCPM_H
#define MCPM_H
/*
* Maximum number of possible clusters / CPUs per cluster.
*
* This should be sufficient for quite a while, while keeping the
* (assembly) code simpler. When this starts to grow then we'll have
* to consider dynamic allocation.
*/
#define MAX_CPUS_PER_CLUSTER 4
#ifdef CONFIG_MCPM_QUAD_CLUSTER
#define MAX_NR_CLUSTERS 4
#else
#define MAX_NR_CLUSTERS 2
#endif
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/cacheflush.h>
/*
* Platform specific code should use this symbol to set up secondary
* entry location for processors to use when released from reset.
*/
extern void mcpm_entry_point(void);
/*
* This is used to indicate where the given CPU from given cluster should
* branch once it is ready to re-enter the kernel using ptr, or NULL if it
* should be gated. A gated CPU is held in a WFE loop until its vector
* becomes non NULL.
*/
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
* This sets an early poke i.e a value to be poked into some address
* from very early assembly code before the CPU is ungated. The
* address must be physical, and if 0 then nothing will happen.
*/
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
unsigned long poke_phys_addr, unsigned long poke_val);
/*
* CPU/cluster power operations API for higher subsystems to use.
*/
/**
* mcpm_is_available - returns whether MCPM is initialized and available
*
* This returns true or false accordingly.
*/
bool mcpm_is_available(void);
/**
* mcpm_cpu_power_up - make given CPU in given cluster runable
*
* @cpu: CPU number within given cluster
* @cluster: cluster number for the CPU
*
* The identified CPU is brought out of reset. If the cluster was powered
* down then it is brought up as well, taking care not to let the other CPUs
* in the cluster run, and ensuring appropriate cluster setup.
*
* Caller must ensure the appropriate entry vector is initialized with
* mcpm_set_entry_vector() prior to calling this.
*
* This must be called in a sleepable context. However, the implementation
* is strongly encouraged to return early and let the operation happen
* asynchronously, especially when significant delays are expected.
*
* If the operation cannot be performed then an error code is returned.
*/
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
/**
* mcpm_cpu_power_down - power the calling CPU down
*
* The calling CPU is powered down.
*
* If this CPU is found to be the "last man standing" in the cluster
* then the cluster is prepared for power-down too.
*
* This must be called with interrupts disabled.
*
* On success this does not return. Re-entry in the kernel is expected
* via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*
* On success, the CPU is not guaranteed to be truly halted until
* mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
* specified cpu. Until then, other CPUs should make sure they do not
* trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
* mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
* make sure it is powered off
*
* @cpu: CPU number within given cluster
* @cluster: cluster number for the CPU
*
* Call this function to ensure that a pending powerdown has taken
* effect and the CPU is safely parked before performing non-mcpm
* operations that may affect the CPU (such as kexec trashing the
* kernel text).
*
* It is *not* necessary to call this function if you only need to
* serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
* event.
*
* Do not call this function unless the specified CPU has already
* called mcpm_cpu_power_down() or has committed to doing so.
*
* @return:
* - zero if the CPU is in a safely parked state
* - nonzero otherwise (e.g., timeout)
*/
int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
* except for possible extra platform specific configuration steps to allow
* an asynchronous wake-up e.g. with a pending interrupt.
*
* If this CPU is found to be the "last man standing" in the cluster
* then the cluster may be prepared for power-down too.
*
* This must be called with interrupts disabled.
*
* On success this does not return. Re-entry in the kernel is expected
* via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*/
void mcpm_cpu_suspend(void);
/**
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
*
* This lets the platform specific backend code perform needed housekeeping
* work. This must be called by the newly activated CPU as soon as it is
* fully operational in kernel space, before it enables interrupts.
*
* If the operation cannot be performed then an error code is returned.
*/
int mcpm_cpu_powered_up(void);
/*
* Platform specific callbacks used in the implementation of the above API.
*
* cpu_powerup:
* Make given CPU runable. Called with MCPM lock held and IRQs disabled.
* The given cluster is assumed to be set up (cluster_powerup would have
* been called beforehand). Must return 0 for success or negative error code.
*
* cluster_powerup:
* Set up power for given cluster. Called with MCPM lock held and IRQs
* disabled. Called before first cpu_powerup when cluster is down. Must
* return 0 for success or negative error code.
*
* cpu_suspend_prepare:
* Special suspend configuration. Called on target CPU with MCPM lock held
* and IRQs disabled. This callback is optional. If provided, it is called
* before cpu_powerdown_prepare.
*
* cpu_powerdown_prepare:
* Configure given CPU for power down. Called on target CPU with MCPM lock
* held and IRQs disabled. Power down must be effective only at the next WFI instruction.
*
* cluster_powerdown_prepare:
* Configure given cluster for power down. Called on one CPU from target
* cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare
* for each CPU in the cluster has happened when this occurs.
*
* cpu_cache_disable:
* Clean and disable CPU level cache for the calling CPU. Called on with IRQs
* disabled only. The CPU is no longer cache coherent with the rest of the
* system when this returns.
*
* cluster_cache_disable:
* Clean and disable the cluster wide cache as well as the CPU level cache
* for the calling CPU. No call to cpu_cache_disable will happen for this
* CPU. Called with IRQs disabled and only when all the other CPUs are done
* with their own cpu_cache_disable. The cluster is no longer cache coherent
* with the rest of the system when this returns.
*
* cpu_is_up:
* Called on given CPU after it has been powered up or resumed. The MCPM lock
* is held and IRQs disabled. This callback is optional.
*
* cluster_is_up:
* Called by the first CPU to be powered up or resumed in given cluster.
* The MCPM lock is held and IRQs disabled. This callback is optional. If
* provided, it is called before cpu_is_up for that CPU.
*
* wait_for_powerdown:
* Wait until given CPU is powered down. This is called in sleeping context.
* Some reasonable timeout must be considered. Must return 0 for success or
* negative error code.
*/
struct mcpm_platform_ops {
int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
int (*cluster_powerup)(unsigned int cluster);
void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
void (*cluster_powerdown_prepare)(unsigned int cluster);
void (*cpu_cache_disable)(void);
void (*cluster_cache_disable)(void);
void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
void (*cluster_is_up)(unsigned int cluster);
int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
};
/**
* mcpm_platform_register - register platform specific power methods
*
* @ops: mcpm_platform_ops structure to register
*
* An error is returned if the registration has been done previously.
*/
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
/**
* mcpm_sync_init - Initialize the cluster synchronization support
*
* @power_up_setup: platform specific function invoked during very
* early CPU/cluster bringup stage.
*
* This prepares memory used by vlocks and the MCPM state machine used
* across CPUs that may have their caches active or inactive. Must be
* called only after a successful call to mcpm_platform_register().
*
* The power_up_setup argument is a pointer to assembly code called when
* the MMU and caches are still disabled during boot and no stack space is
* available. The affinity level passed to that code corresponds to the
* resource that needs to be initialized (e.g. 1 for cluster level, 0 for
* CPU level). Proper exclusion mechanisms are already activated at that
* point.
*/
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level));
/**
* mcpm_loopback - make a run through the MCPM low-level code
*
* @cache_disable: pointer to function performing cache disabling
*
* This exercises the MCPM machinery by soft resetting the CPU and branching
* to the MCPM low-level entry code before returning to the caller.
* The @cache_disable function must do the necessary cache disabling to
* let the regular kernel init code turn it back on as if the CPU was
* hotplugged in. The MCPM state machine is set as if the cluster was
* initialized meaning the power_up_setup callback passed to mcpm_sync_init()
* will be invoked for all affinity levels. This may be useful to initialize
* some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
*/
int __init mcpm_loopback(void (*cache_disable)(void));
void __init mcpm_smp_set_ops(void);
/*
* Synchronisation structures for coordinating safe cluster setup/teardown.
* This is private to the MCPM core code and shared between C and assembly.
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
* to match.
*/
struct mcpm_sync_struct {
/* individual CPU states */
struct {
s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
} cpus[MAX_CPUS_PER_CLUSTER];
/* cluster state */
s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
/* inbound-side state */
s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
};
struct sync_struct {
struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
};
#else
/*
* asm-offsets.h causes trouble when included in .c files, and cacheflush.h
* cannot be included in asm files. Let's work around the conflict like this.
*/
#include <asm/asm-offsets.h>
#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
#endif /* ! __ASSEMBLY__ */
/* Definitions for mcpm_sync_struct */
#define CPU_DOWN 0x11
#define CPU_COMING_UP 0x12
#define CPU_UP 0x13
#define CPU_GOING_DOWN 0x14
#define CLUSTER_DOWN 0x21
#define CLUSTER_UP 0x22
#define CLUSTER_GOING_DOWN 0x23
#define INBOUND_NOT_COMING_UP 0x31
#define INBOUND_COMING_UP 0x32
/*
* Offsets for the mcpm_sync_struct members, for use in asm.
* We don't want to make them global to the kernel via asm-offsets.c.
*/
#define MCPM_SYNC_CLUSTER_CPUS 0
#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
#define MCPM_SYNC_CLUSTER_CLUSTER \
(MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
#define MCPM_SYNC_CLUSTER_INBOUND \
(MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
#define MCPM_SYNC_CLUSTER_SIZE \
(MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
#endif
|
/*
* Copyright (c) 2017 MediaTek Inc.
* Author: Ming Huang <[email protected]>
* Sean Wang <[email protected]>
*
* SPDX-License-Identifier: (GPL-2.0 OR MIT)
*/
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/mt7622-clk.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/mt7622-power.h>
#include <dt-bindings/reset/mt7622-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
compatible = "mediatek,mt7622";
interrupt-parent = <&sysirq>;
#address-cells = <2>;
#size-cells = <2>;
cpu_opp_table: opp-table {
compatible = "operating-points-v2";
opp-shared;
opp-300000000 {
opp-hz = /bits/ 64 <30000000>;
opp-microvolt = <950000>;
};
opp-437500000 {
opp-hz = /bits/ 64 <437500000>;
opp-microvolt = <1000000>;
};
opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <1050000>;
};
opp-812500000 {
opp-hz = /bits/ 64 <812500000>;
opp-microvolt = <1100000>;
};
opp-1025000000 {
opp-hz = /bits/ 64 <1025000000>;
opp-microvolt = <1150000>;
};
opp-1137500000 {
opp-hz = /bits/ 64 <1137500000>;
opp-microvolt = <1200000>;
};
opp-1262500000 {
opp-hz = /bits/ 64 <1262500000>;
opp-microvolt = <1250000>;
};
opp-1350000000 {
opp-hz = /bits/ 64 <1350000000>;
opp-microvolt = <1310000>;
};
};
cpus {
#address-cells = <2>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
clocks = <&infracfg CLK_INFRA_MUX1_SEL>,
<&apmixedsys CLK_APMIXED_MAIN_CORE_EN>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
enable-method = "psci";
clock-frequency = <1300000000>;
cci-control-port = <&cci_control2>;
next-level-cache = <&L2>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
clocks = <&infracfg CLK_INFRA_MUX1_SEL>,
<&apmixedsys CLK_APMIXED_MAIN_CORE_EN>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
enable-method = "psci";
clock-frequency = <1300000000>;
cci-control-port = <&cci_control2>;
next-level-cache = <&L2>;
};
L2: l2-cache {
compatible = "cache";
cache-level = <2>;
cache-unified;
};
};
pwrap_clk: dummy40m {
compatible = "fixed-clock";
clock-frequency = <40000000>;
#clock-cells = <0>;
};
clk25m: oscillator {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <25000000>;
clock-output-names = "clkxtal";
};
psci {
compatible = "arm,psci-0.2";
method = "smc";
};
pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_LOW>;
interrupt-affinity = <&cpu0>, <&cpu1>;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
secmon_reserved: secmon@43000000 {
reg = <0 0x43000000 0 0x30000>;
no-map;
};
};
thermal-zones {
cpu_thermal: cpu-thermal {
polling-delay-passive = <1000>;
polling-delay = <1000>;
thermal-sensors = <&thermal 0>;
trips {
cpu_passive: cpu-passive {
temperature = <47000>;
hysteresis = <2000>;
type = "passive";
};
cpu_active: cpu-active {
temperature = <67000>;
hysteresis = <2000>;
type = "active";
};
cpu_hot: cpu-hot {
temperature = <87000>;
hysteresis = <2000>;
type = "hot";
};
cpu-crit {
temperature = <107000>;
hysteresis = <2000>;
type = "critical";
};
};
cooling-maps {
map0 {
trip = <&cpu_passive>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map1 {
trip = <&cpu_active>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map2 {
trip = <&cpu_hot>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
};
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>;
};
infracfg: infracfg@10000000 {
compatible = "mediatek,mt7622-infracfg",
"syscon";
reg = <0 0x10000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pwrap: pwrap@10001000 {
compatible = "mediatek,mt7622-pwrap";
reg = <0 0x10001000 0 0x250>;
reg-names = "pwrap";
clocks = <&infracfg CLK_INFRA_PMIC_PD>, <&pwrap_clk>;
clock-names = "spi", "wrap";
resets = <&infracfg MT7622_INFRA_PMIC_WRAP_RST>;
reset-names = "pwrap";
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
pericfg: pericfg@10002000 {
compatible = "mediatek,mt7622-pericfg",
"syscon";
reg = <0 0x10002000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
scpsys: power-controller@10006000 {
compatible = "mediatek,mt7622-scpsys",
"syscon";
#power-domain-cells = <1>;
reg = <0 0x10006000 0 0x1000>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 166 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 168 IRQ_TYPE_LEVEL_LOW>;
infracfg = <&infracfg>;
clocks = <&topckgen CLK_TOP_HIF_SEL>;
clock-names = "hif_sel";
};
cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
clocks = <&infracfg CLK_INFRA_IRRX_PD>,
<&topckgen CLK_TOP_AXI_SEL>;
clock-names = "clk", "bus";
status = "disabled";
};
sysirq: interrupt-controller@10200620 {
compatible = "mediatek,mt7622-sysirq",
"mediatek,mt6577-sysirq";
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
reg = <0 0x10200620 0 0x20>;
};
efuse: efuse@10206000 {
compatible = "mediatek,mt7622-efuse",
"mediatek,efuse";
reg = <0 0x10206000 0 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
thermal_calibration: calib@198 {
reg = <0x198 0xc>;
};
};
apmixedsys: clock-controller@10209000 {
compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
topckgen: clock-controller@10210000 {
compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
rng: rng@1020f000 {
compatible = "mediatek,mt7622-rng",
"mediatek,mt7623-rng";
reg = <0 0x1020f000 0 0x1000>;
clocks = <&infracfg CLK_INFRA_TRNG>;
clock-names = "rng";
};
pio: pinctrl@10211000 {
compatible = "mediatek,mt7622-pinctrl";
reg = <0 0x10211000 0 0x1000>,
<0 0x10005000 0 0x1000>;
reg-names = "base", "eint";
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&pio 0 0 103>;
interrupt-controller;
interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
#interrupt-cells = <2>;
};
watchdog: watchdog@10212000 {
compatible = "mediatek,mt7622-wdt",
"mediatek,mt6589-wdt";
reg = <0 0x10212000 0 0x800>;
};
rtc: rtc@10212800 {
compatible = "mediatek,mt7622-rtc",
"mediatek,soc-rtc";
reg = <0 0x10212800 0 0x200>;
interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_RTC>;
clock-names = "rtc";
};
gic: interrupt-controller@10300000 {
compatible = "arm,gic-400";
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
reg = <0 0x10310000 0 0x1000>,
<0 0x10320000 0 0x1000>,
<0 0x10340000 0 0x2000>,
<0 0x10360000 0 0x2000>;
};
cci: cci@10390000 {
compatible = "arm,cci-400";
#address-cells = <1>;
#size-cells = <1>;
reg = <0 0x10390000 0 0x1000>;
ranges = <0 0 0x10390000 0x10000>;
cci_control0: slave-if@1000 {
compatible = "arm,cci-400-ctrl-if";
interface-type = "ace-lite";
reg = <0x1000 0x1000>;
};
cci_control1: slave-if@4000 {
compatible = "arm,cci-400-ctrl-if";
interface-type = "ace";
reg = <0x4000 0x1000>;
};
cci_control2: slave-if@5000 {
compatible = "arm,cci-400-ctrl-if", "syscon";
interface-type = "ace";
reg = <0x5000 0x1000>;
};
pmu@9000 {
compatible = "arm,cci-400-pmu,r1";
reg = <0x9000 0x5000>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
};
};
auxadc: adc@11001000 {
compatible = "mediatek,mt7622-auxadc";
reg = <0 0x11001000 0 0x1000>;
clocks = <&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "main";
#io-channel-cells = <1>;
};
uart0: serial@11002000 {
compatible = "mediatek,mt7622-uart",
"mediatek,mt6577-uart";
reg = <0 0x11002000 0 0x400>;
interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&pericfg CLK_PERI_UART0_PD>;
clock-names = "baud", "bus";
status = "disabled";
};
uart1: serial@11003000 {
compatible = "mediatek,mt7622-uart",
"mediatek,mt6577-uart";
reg = <0 0x11003000 0 0x400>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&pericfg CLK_PERI_UART1_PD>;
clock-names = "baud", "bus";
status = "disabled";
};
uart2: serial@11004000 {
compatible = "mediatek,mt7622-uart",
"mediatek,mt6577-uart";
reg = <0 0x11004000 0 0x400>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&pericfg CLK_PERI_UART2_PD>;
clock-names = "baud", "bus";
status = "disabled";
};
uart3: serial@11005000 {
compatible = "mediatek,mt7622-uart",
"mediatek,mt6577-uart";
reg = <0 0x11005000 0 0x400>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&pericfg CLK_PERI_UART3_PD>;
clock-names = "baud", "bus";
status = "disabled";
};
pwm: pwm@11006000 {
compatible = "mediatek,mt7622-pwm";
reg = <0 0x11006000 0 0x1000>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&pericfg CLK_PERI_PWM_PD>,
<&pericfg CLK_PERI_PWM1_PD>,
<&pericfg CLK_PERI_PWM2_PD>,
<&pericfg CLK_PERI_PWM3_PD>,
<&pericfg CLK_PERI_PWM4_PD>,
<&pericfg CLK_PERI_PWM5_PD>,
<&pericfg CLK_PERI_PWM6_PD>;
clock-names = "top", "main", "pwm1", "pwm2", "pwm3", "pwm4",
"pwm5", "pwm6";
status = "disabled";
};
i2c0: i2c@11007000 {
compatible = "mediatek,mt7622-i2c";
reg = <0 0x11007000 0 0x90>,
<0 0x11000100 0 0x80>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
clock-div = <16>;
clocks = <&pericfg CLK_PERI_I2C0_PD>,
<&pericfg CLK_PERI_AP_DMA_PD>;
clock-names = "main", "dma";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
i2c1: i2c@11008000 {
compatible = "mediatek,mt7622-i2c";
reg = <0 0x11008000 0 0x90>,
<0 0x11000180 0 0x80>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
clock-div = <16>;
clocks = <&pericfg CLK_PERI_I2C1_PD>,
<&pericfg CLK_PERI_AP_DMA_PD>;
clock-names = "main", "dma";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
i2c2: i2c@11009000 {
compatible = "mediatek,mt7622-i2c";
reg = <0 0x11009000 0 0x90>,
<0 0x11000200 0 0x80>;
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
clock-div = <16>;
clocks = <&pericfg CLK_PERI_I2C2_PD>,
<&pericfg CLK_PERI_AP_DMA_PD>;
clock-names = "main", "dma";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
spi0: spi@1100a000 {
compatible = "mediatek,mt7622-spi";
reg = <0 0x1100a000 0 0x100>;
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
<&topckgen CLK_TOP_SPI0_SEL>,
<&pericfg CLK_PERI_SPI0_PD>;
clock-names = "parent-clk", "sel-clk", "spi-clk";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
thermal: thermal@1100b000 {
#thermal-sensor-cells = <1>;
compatible = "mediatek,mt7622-thermal";
reg = <0 0x1100b000 0 0x1000>;
interrupts = <0 78 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_THERM_PD>,
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
};
btif: serial@1100c000 {
compatible = "mediatek,mt7622-btif",
"mediatek,mtk-btif";
reg = <0 0x1100c000 0 0x1000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_BTIF_PD>;
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
bluetooth {
compatible = "mediatek,mt7622-bluetooth";
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
clocks = <&clk25m>;
clock-names = "ref";
};
};
nandc: nand-controller@1100d000 {
compatible = "mediatek,mt7622-nfc";
reg = <0 0x1100D000 0 0x1000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_NFI_PD>,
<&pericfg CLK_PERI_SNFI_PD>;
clock-names = "nfi_clk", "pad_clk";
ecc-engine = <&bch>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
snfi: spi@1100d000 {
compatible = "mediatek,mt7622-snand";
reg = <0 0x1100d000 0 0x1000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_NFI_PD>, <&pericfg CLK_PERI_SNFI_PD>;
clock-names = "nfi_clk", "pad_clk";
nand-ecc-engine = <&bch>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
bch: ecc@1100e000 {
compatible = "mediatek,mt7622-ecc";
reg = <0 0x1100e000 0 0x1000>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_NFIECC_PD>;
clock-names = "nfiecc_clk";
status = "disabled";
};
nor_flash: spi@11014000 {
compatible = "mediatek,mt7622-nor",
"mediatek,mt8173-nor";
reg = <0 0x11014000 0 0xe0>;
clocks = <&pericfg CLK_PERI_FLASH_PD>,
<&topckgen CLK_TOP_FLASH_SEL>;
clock-names = "spi", "sf";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
spi1: spi@11016000 {
compatible = "mediatek,mt7622-spi";
reg = <0 0x11016000 0 0x100>;
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
<&topckgen CLK_TOP_SPI1_SEL>,
<&pericfg CLK_PERI_SPI1_PD>;
clock-names = "parent-clk", "sel-clk", "spi-clk";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
uart4: serial@11019000 {
compatible = "mediatek,mt7622-uart",
"mediatek,mt6577-uart";
reg = <0 0x11019000 0 0x400>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_UART_SEL>,
<&pericfg CLK_PERI_UART4_PD>;
clock-names = "baud", "bus";
status = "disabled";
};
audsys: clock-controller@11220000 {
compatible = "mediatek,mt7622-audsys", "syscon";
reg = <0 0x11220000 0 0x2000>;
#clock-cells = <1>;
afe: audio-controller {
compatible = "mediatek,mt7622-audio";
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 145 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "afe", "asys";
clocks = <&infracfg CLK_INFRA_AUDIO_PD>,
<&topckgen CLK_TOP_AUD1_SEL>,
<&topckgen CLK_TOP_AUD2_SEL>,
<&topckgen CLK_TOP_A1SYS_HP_DIV_PD>,
<&topckgen CLK_TOP_A2SYS_HP_DIV_PD>,
<&topckgen CLK_TOP_I2S0_MCK_SEL>,
<&topckgen CLK_TOP_I2S1_MCK_SEL>,
<&topckgen CLK_TOP_I2S2_MCK_SEL>,
<&topckgen CLK_TOP_I2S3_MCK_SEL>,
<&topckgen CLK_TOP_I2S0_MCK_DIV>,
<&topckgen CLK_TOP_I2S1_MCK_DIV>,
<&topckgen CLK_TOP_I2S2_MCK_DIV>,
<&topckgen CLK_TOP_I2S3_MCK_DIV>,
<&topckgen CLK_TOP_I2S0_MCK_DIV_PD>,
<&topckgen CLK_TOP_I2S1_MCK_DIV_PD>,
<&topckgen CLK_TOP_I2S2_MCK_DIV_PD>,
<&topckgen CLK_TOP_I2S3_MCK_DIV_PD>,
<&audsys CLK_AUDIO_I2SO1>,
<&audsys CLK_AUDIO_I2SO2>,
<&audsys CLK_AUDIO_I2SO3>,
<&audsys CLK_AUDIO_I2SO4>,
<&audsys CLK_AUDIO_I2SIN1>,
<&audsys CLK_AUDIO_I2SIN2>,
<&audsys CLK_AUDIO_I2SIN3>,
<&audsys CLK_AUDIO_I2SIN4>,
<&audsys CLK_AUDIO_ASRCO1>,
<&audsys CLK_AUDIO_ASRCO2>,
<&audsys CLK_AUDIO_ASRCO3>,
<&audsys CLK_AUDIO_ASRCO4>,
<&audsys CLK_AUDIO_AFE>,
<&audsys CLK_AUDIO_AFE_CONN>,
<&audsys CLK_AUDIO_A1SYS>,
<&audsys CLK_AUDIO_A2SYS>;
clock-names = "infra_sys_audio_clk",
"top_audio_mux1_sel",
"top_audio_mux2_sel",
"top_audio_a1sys_hp",
"top_audio_a2sys_hp",
"i2s0_src_sel",
"i2s1_src_sel",
"i2s2_src_sel",
"i2s3_src_sel",
"i2s0_src_div",
"i2s1_src_div",
"i2s2_src_div",
"i2s3_src_div",
"i2s0_mclk_en",
"i2s1_mclk_en",
"i2s2_mclk_en",
"i2s3_mclk_en",
"i2so0_hop_ck",
"i2so1_hop_ck",
"i2so2_hop_ck",
"i2so3_hop_ck",
"i2si0_hop_ck",
"i2si1_hop_ck",
"i2si2_hop_ck",
"i2si3_hop_ck",
"asrc0_out_ck",
"asrc1_out_ck",
"asrc2_out_ck",
"asrc3_out_ck",
"audio_afe_pd",
"audio_afe_conn_pd",
"audio_a1sys_pd",
"audio_a2sys_pd";
assigned-clocks = <&topckgen CLK_TOP_A1SYS_HP_SEL>,
<&topckgen CLK_TOP_A2SYS_HP_SEL>,
<&topckgen CLK_TOP_A1SYS_HP_DIV>,
<&topckgen CLK_TOP_A2SYS_HP_DIV>;
assigned-clock-parents = <&topckgen CLK_TOP_AUD1PLL>,
<&topckgen CLK_TOP_AUD2PLL>;
assigned-clock-rates = <0>, <0>, <49152000>, <45158400>;
};
};
mmc0: mmc@11230000 {
compatible = "mediatek,mt7622-mmc";
reg = <0 0x11230000 0 0x1000>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
<&topckgen CLK_TOP_MSDC50_0_SEL>;
clock-names = "source", "hclk";
resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
reset-names = "hrst";
status = "disabled";
};
mmc1: mmc@11240000 {
compatible = "mediatek,mt7622-mmc";
reg = <0 0x11240000 0 0x1000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
<&topckgen CLK_TOP_AXI_SEL>;
clock-names = "source", "hclk";
resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
reset-names = "hrst";
status = "disabled";
};
wmac: wmac@18000000 {
compatible = "mediatek,mt7622-wmac";
reg = <0 0x18000000 0 0x100000>;
interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
mediatek,infracfg = <&infracfg>;
status = "disabled";
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
ssusbsys: clock-controller@1a000000 {
compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
ssusb: usb@1a0c0000 {
compatible = "mediatek,mt7622-xhci",
"mediatek,mtk-xhci";
reg = <0 0x1a0c0000 0 0x01000>,
<0 0x1a0c4700 0 0x0100>;
reg-names = "mac", "ippc";
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF1>;
clocks = <&ssusbsys CLK_SSUSB_SYS_EN>,
<&ssusbsys CLK_SSUSB_REF_EN>,
<&ssusbsys CLK_SSUSB_MCU_EN>,
<&ssusbsys CLK_SSUSB_DMA_EN>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck";
phys = <&u2port0 PHY_TYPE_USB2>,
<&u3port0 PHY_TYPE_USB3>,
<&u2port1 PHY_TYPE_USB2>;
status = "disabled";
};
u3phy: t-phy@1a0c4000 {
compatible = "mediatek,mt7622-tphy",
"mediatek,generic-tphy-v1";
reg = <0 0x1a0c4000 0 0x700>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
u2port0: usb-phy@1a0c4800 {
reg = <0 0x1a0c4800 0 0x0100>;
#phy-cells = <1>;
clocks = <&ssusbsys CLK_SSUSB_U2_PHY_EN>;
clock-names = "ref";
};
u3port0: usb-phy@1a0c4900 {
reg = <0 0x1a0c4900 0 0x0700>;
#phy-cells = <1>;
clocks = <&clk25m>;
clock-names = "ref";
};
u2port1: usb-phy@1a0c5000 {
reg = <0 0x1a0c5000 0 0x0100>;
#phy-cells = <1>;
clocks = <&ssusbsys CLK_SSUSB_U2_PHY_1P_EN>;
clock-names = "ref";
};
};
pciesys: clock-controller@1a100800 {
compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pciecfg: pciecfg@1a140000 {
compatible = "mediatek,generic-pciecfg", "syscon";
reg = <0 0x1a140000 0 0x1000>;
};
pcie0: pcie@1a143000 {
compatible = "mediatek,mt7622-pcie";
device_type = "pci";
reg = <0 0x1a143000 0 0x1000>;
reg-names = "port0";
linux,pci-domain = <0>;
#address-cells = <3>;
#size-cells = <2>;
interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "pcie_irq";
clocks = <&pciesys CLK_PCIE_P0_MAC_EN>,
<&pciesys CLK_PCIE_P0_AHB_EN>,
<&pciesys CLK_PCIE_P0_AUX_EN>,
<&pciesys CLK_PCIE_P0_AXI_EN>,
<&pciesys CLK_PCIE_P0_OBFF_EN>,
<&pciesys CLK_PCIE_P0_PIPE_EN>;
clock-names = "sys_ck0", "ahb_ck0", "aux_ck0",
"axi_ck0", "obff_ck0", "pipe_ck0";
power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
bus-range = <0x00 0xff>;
ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x8000000>;
status = "disabled";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
<0 0 0 3 &pcie_intc0 2>,
<0 0 0 4 &pcie_intc0 3>;
pcie_intc0: interrupt-controller {
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
};
};
pcie1: pcie@1a145000 {
compatible = "mediatek,mt7622-pcie";
device_type = "pci";
reg = <0 0x1a145000 0 0x1000>;
reg-names = "port1";
linux,pci-domain = <1>;
#address-cells = <3>;
#size-cells = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "pcie_irq";
clocks = <&pciesys CLK_PCIE_P1_MAC_EN>,
/* designer has connect RC1 with p0_ahb clock */
<&pciesys CLK_PCIE_P0_AHB_EN>,
<&pciesys CLK_PCIE_P1_AUX_EN>,
<&pciesys CLK_PCIE_P1_AXI_EN>,
<&pciesys CLK_PCIE_P1_OBFF_EN>,
<&pciesys CLK_PCIE_P1_PIPE_EN>;
clock-names = "sys_ck1", "ahb_ck1", "aux_ck1",
"axi_ck1", "obff_ck1", "pipe_ck1";
power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
bus-range = <0x00 0xff>;
ranges = <0x82000000 0 0x28000000 0x0 0x28000000 0 0x8000000>;
status = "disabled";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
<0 0 0 3 &pcie_intc1 2>,
<0 0 0 4 &pcie_intc1 3>;
pcie_intc1: interrupt-controller {
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
};
};
sata: sata@1a200000 {
compatible = "mediatek,mt7622-ahci",
"mediatek,mtk-ahci";
reg = <0 0x1a200000 0 0x1100>;
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hostc";
clocks = <&pciesys CLK_SATA_AHB_EN>,
<&pciesys CLK_SATA_AXI_EN>,
<&pciesys CLK_SATA_ASIC_EN>,
<&pciesys CLK_SATA_RBC_EN>,
<&pciesys CLK_SATA_PM_EN>;
clock-names = "ahb", "axi", "asic", "rbc", "pm";
phys = <&sata_port PHY_TYPE_SATA>;
phy-names = "sata-phy";
ports-implemented = <0x1>;
power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
resets = <&pciesys MT7622_SATA_AXI_BUS_RST>,
<&pciesys MT7622_SATA_PHY_SW_RST>,
<&pciesys MT7622_SATA_PHY_REG_RST>;
reset-names = "axi", "sw", "reg";
mediatek,phy-mode = <&pciesys>;
status = "disabled";
};
sata_phy: t-phy {
compatible = "mediatek,mt7622-tphy",
"mediatek,generic-tphy-v1";
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
sata_port: sata-phy@1a243000 {
reg = <0 0x1a243000 0 0x0100>;
clocks = <&topckgen CLK_TOP_ETH_500M>;
clock-names = "ref";
#phy-cells = <1>;
};
};
hifsys: clock-controller@1af00000 {
compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
#clock-cells = <1>;
};
ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
hsdma: dma-controller@1b007000 {
compatible = "mediatek,mt7622-hsdma";
reg = <0 0x1b007000 0 0x1000>;
interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_LOW>;
clocks = <ðsys CLK_ETH_HSDMA_EN>;
clock-names = "hsdma";
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
#dma-cells = <1>;
dma-requests = <3>;
};
pcie_mirror: pcie-mirror@10000400 {
compatible = "mediatek,mt7622-pcie-mirror",
"syscon";
reg = <0 0x10000400 0 0x10>;
};
wed0: wed@1020a000 {
compatible = "mediatek,mt7622-wed",
"syscon";
reg = <0 0x1020a000 0 0x1000>;
interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
};
wed1: wed@1020b000 {
compatible = "mediatek,mt7622-wed",
"syscon";
reg = <0 0x1020b000 0 0x1000>;
interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 225 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_ETH_SEL>,
<ðsys CLK_ETH_ESW_EN>,
<ðsys CLK_ETH_GP0_EN>,
<ðsys CLK_ETH_GP1_EN>,
<ðsys CLK_ETH_GP2_EN>,
<&sgmiisys CLK_SGMII_TX250M_EN>,
<&sgmiisys CLK_SGMII_RX250M_EN>,
<&sgmiisys CLK_SGMII_CDR_REF>,
<&sgmiisys CLK_SGMII_CDR_FB>,
<&topckgen CLK_TOP_SGMIIPLL>,
<&apmixedsys CLK_APMIXED_ETH2PLL>;
clock-names = "ethif", "esw", "gp0", "gp1", "gp2",
"sgmii_tx250m", "sgmii_rx250m",
"sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck",
"eth2pll";
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
mediatek,ethsys = <ðsys>;
mediatek,sgmiisys = <&sgmiisys>;
cci-control-port = <&cci_control2>;
mediatek,wed = <&wed0>, <&wed1>;
mediatek,pcie-mirror = <&pcie_mirror>;
mediatek,hifsys = <&hifsys>;
dma-coherent;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
sgmiisys: sgmiisys@1b128000 {
compatible = "mediatek,mt7622-sgmiisys",
"syscon";
reg = <0 0x1b128000 0 0x3000>;
#clock-cells = <1>;
};
};
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* @File cthw20k1.h
*
* @Brief
* This file contains the definition of hardware access methord.
*
* @Author Liu Chun
* @Date May 13 2008
*/
#ifndef CTHW20K1_H
#define CTHW20K1_H
#include "cthardware.h"
int create_20k1_hw_obj(struct hw **rhw);
int destroy_20k1_hw_obj(struct hw *hw);
#endif /* CTHW20K1_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Create default crypto algorithm instances.
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <linux/completion.h>
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
struct cryptomgr_param {
struct rtattr *tb[CRYPTO_MAX_ATTRS + 2];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} type;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} attrs[CRYPTO_MAX_ATTRS];
char template[CRYPTO_MAX_ALG_NAME];
struct crypto_larval *larval;
u32 otype;
u32 omask;
};
struct crypto_test_param {
char driver[CRYPTO_MAX_ALG_NAME];
char alg[CRYPTO_MAX_ALG_NAME];
u32 type;
};
static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
int err = -ENOENT;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
goto out;
do {
err = tmpl->create(tmpl, param->tb);
} while (err == -EAGAIN && !signal_pending(current));
crypto_tmpl_put(tmpl);
out:
param->larval->adult = ERR_PTR(err);
param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD;
complete_all(¶m->larval->completion);
crypto_alg_put(¶m->larval->alg);
kfree(param);
module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
{
struct task_struct *thread;
struct cryptomgr_param *param;
const char *name = larval->alg.cra_name;
const char *p;
unsigned int len;
int i;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = p - name;
if (!len || *p != '(')
goto err_free_param;
memcpy(param->template, name, len);
i = 0;
for (;;) {
name = ++p;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
if (*p == '(') {
int recursion = 0;
for (;;) {
if (!*++p)
goto err_free_param;
if (*p == '(')
recursion++;
else if (*p == ')' && !recursion--)
break;
}
p++;
}
len = p - name;
if (!len)
goto err_free_param;
param->attrs[i].attr.rta_len = sizeof(param->attrs[i]);
param->attrs[i].attr.rta_type = CRYPTOA_ALG;
memcpy(param->attrs[i].data.name, name, len);
param->tb[i + 1] = ¶m->attrs[i].attr;
i++;
if (i >= CRYPTO_MAX_ATTRS)
goto err_free_param;
if (*p == ')')
break;
if (*p != ',')
goto err_free_param;
}
param->tb[i + 1] = NULL;
param->type.attr.rta_len = sizeof(param->type);
param->type.attr.rta_type = CRYPTOA_TYPE;
param->type.data.type = larval->alg.cra_flags & ~CRYPTO_ALG_TESTED;
param->type.data.mask = larval->mask & ~CRYPTO_ALG_TESTED;
param->tb[0] = ¶m->type.attr;
param->otype = larval->alg.cra_flags;
param->omask = larval->mask;
crypto_alg_get(&larval->alg);
param->larval = larval;
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
if (IS_ERR(thread))
goto err_put_larval;
return NOTIFY_STOP;
err_put_larval:
crypto_alg_put(&larval->alg);
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
int err;
err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
crypto_alg_tested(param->driver, err);
kfree(param);
module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_test(struct crypto_alg *alg)
{
struct task_struct *thread;
struct crypto_test_param *param;
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
param->type = alg->cra_flags;
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
if (IS_ERR(thread))
goto err_free_param;
return NOTIFY_STOP;
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
{
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
case CRYPTO_MSG_ALG_LOADED:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block cryptomgr_notifier = {
.notifier_call = cryptomgr_notify,
};
static int __init cryptomgr_init(void)
{
return crypto_register_notifier(&cryptomgr_notifier);
}
static void __exit cryptomgr_exit(void)
{
int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err);
}
/*
* This is arch_initcall() so that the crypto self-tests are run on algorithms
* registered early by subsys_initcall(). subsys_initcall() is needed for
* generic implementations so that they're available for comparison tests when
* other implementations are registered later by module_init().
*/
arch_initcall(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Algorithm Manager");
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for OpenBlocks AX3-4 board
*
* Copyright (C) 2012 Marvell
*
* Thomas Petazzoni <[email protected]>
*/
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include "armada-xp-mv78260.dtsi"
/ {
model = "PlatHome OpenBlocks AX3-4 board";
compatible = "plathome,openblocks-ax3-4", "marvell,armadaxp-mv78260", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
};
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000
MBUS_ID(0x0c, 0x04) 0 0 0xd1200000 0x100000>;
devbus-bootcs {
status = "okay";
/* Device Bus parameters are required */
/* Read parameters */
devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
devbus,acc-next-ps = <248000>;
devbus,rd-setup-ps = <0>;
devbus,rd-hold-ps = <0>;
/* Write parameters */
devbus,sync-enable = <0>;
devbus,wr-high-ps = <60000>;
devbus,wr-low-ps = <60000>;
devbus,ale-wr-ps = <60000>;
/* NOR 128 MiB */
nor@0 {
compatible = "cfi-flash";
reg = <0 0x8000000>;
bank-width = <2>;
};
};
internal-regs {
rtc@10300 {
/* No crystal connected to the internal RTC */
status = "disabled";
};
serial@12000 {
status = "okay";
};
serial@12100 {
status = "okay";
};
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&led_pins>;
red_led {
label = "red_led";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
default-state = "off";
};
yellow_led {
label = "yellow_led";
gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
default-state = "off";
};
green_led {
label = "green_led";
gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
};
gpio-keys {
compatible = "gpio-keys";
button-init {
label = "Init Button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
};
};
ethernet@70000 {
status = "okay";
phy = <&phy0>;
phy-mode = "sgmii";
buffer-manager = <&bm>;
bm,pool-long = <0>;
};
ethernet@74000 {
status = "okay";
phy = <&phy1>;
phy-mode = "sgmii";
buffer-manager = <&bm>;
bm,pool-long = <1>;
};
ethernet@30000 {
status = "okay";
phy = <&phy2>;
phy-mode = "sgmii";
buffer-manager = <&bm>;
bm,pool-long = <2>;
};
ethernet@34000 {
status = "okay";
phy = <&phy3>;
phy-mode = "sgmii";
buffer-manager = <&bm>;
bm,pool-long = <3>;
};
i2c@11000 {
status = "okay";
clock-frequency = <400000>;
};
i2c@11100 {
status = "okay";
clock-frequency = <400000>;
s35390a: s35390a@30 {
compatible = "s35390a";
reg = <0x30>;
};
};
sata@a0000 {
nr-ports = <2>;
status = "okay";
};
/* Front side USB 0 */
usb@50000 {
status = "okay";
};
/* Front side USB 1 */
usb@51000 {
status = "okay";
};
bm@c0000 {
status = "okay";
};
};
bm-bppi {
status = "okay";
};
};
};
&pciec {
status = "okay";
/* Internal mini-PCIe connector */
pcie@1,0 {
/* Port 0, Lane 0 */
status = "okay";
};
};
&mdio {
phy0: ethernet-phy@0 {
reg = <0>;
};
phy1: ethernet-phy@1 {
reg = <1>;
};
phy2: ethernet-phy@2 {
reg = <2>;
};
phy3: ethernet-phy@3 {
reg = <3>;
};
};
&pinctrl {
led_pins: led-pins-0 {
marvell,pins = "mpp49", "mpp51", "mpp53";
marvell,function = "gpio";
};
};
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
#ifndef HWS_BUDDY_H_
#define HWS_BUDDY_H_
struct mlx5hws_buddy_mem {
unsigned long **bitmap;
unsigned int *num_free;
u32 max_order;
};
struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
#endif /* HWS_BUDDY_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* offload engine driver for the Marvell XOR engine
* Copyright (C) 2007, 2008, Marvell International Ltd.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/memory.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/cpumask.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
#include "mv_xor.h"
enum mv_xor_type {
XOR_ORION,
XOR_ARMADA_38X,
XOR_ARMADA_37XX,
};
enum mv_xor_mode {
XOR_MODE_IN_REG,
XOR_MODE_IN_DESC,
};
static void mv_xor_issue_pending(struct dma_chan *chan);
#define to_mv_xor_chan(chan) \
container_of(chan, struct mv_xor_chan, dmachan)
#define to_mv_xor_slot(tx) \
container_of(tx, struct mv_xor_desc_slot, async_tx)
#define mv_chan_to_devp(chan) \
((chan)->dmadev.dev)
static void mv_desc_init(struct mv_xor_desc_slot *desc,
dma_addr_t addr, u32 byte_count,
enum dma_ctrl_flags flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
hw_desc->status = XOR_DESC_DMA_OWNED;
hw_desc->phy_next_desc = 0;
/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
XOR_DESC_EOD_INT_EN : 0;
hw_desc->phy_dest_addr = addr;
hw_desc->byte_count = byte_count;
}
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
switch (desc->type) {
case DMA_XOR:
case DMA_INTERRUPT:
hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
break;
case DMA_MEMCPY:
hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
break;
default:
BUG();
return;
}
}
static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
u32 next_desc_addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
BUG_ON(hw_desc->phy_next_desc);
hw_desc->phy_next_desc = next_desc_addr;
}
static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val;
val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
val = ~(val << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_chan_set_mode(struct mv_xor_chan *chan,
u32 op_mode)
{
u32 config = readl_relaxed(XOR_CONFIG(chan));
config &= ~0x7;
config |= op_mode;
#if defined(__BIG_ENDIAN)
config |= XOR_DESCRIPTOR_SWAP;
#else
config &= ~XOR_DESCRIPTOR_SWAP;
#endif
writel_relaxed(config, XOR_CONFIG(chan));
}
static void mv_chan_activate(struct mv_xor_chan *chan)
{
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
/* writel ensures all descriptors are flushed before activation */
writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
return (state == 1) ? 1 : 0;
}
/*
* mv_chan_start_new_chain - program the engine to operate on new
* chain headed by sw_desc
* Caller must hold &mv_chan->lock while calling this function
*/
static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *sw_desc)
{
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
/* set the hardware chain */
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
mv_chan->pending++;
mv_xor_issue_pending(&mv_chan->dmachan);
}
static dma_cookie_t
mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan,
dma_cookie_t cookie)
{
BUG_ON(desc->async_tx.cookie < 0);
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
dma_descriptor_unmap(&desc->async_tx);
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
}
/* run dependent operations */
dma_run_dependencies(&desc->async_tx);
return cookie;
}
static int
mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
if (async_tx_test_ack(&iter->async_tx)) {
list_move_tail(&iter->node, &mv_chan->free_slots);
if (!list_empty(&iter->sg_tx_list)) {
list_splice_tail_init(&iter->sg_tx_list,
&mv_chan->free_slots);
}
}
}
return 0;
}
static int
mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan)
{
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
__func__, __LINE__, desc, desc->async_tx.flags);
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
if (!async_tx_test_ack(&desc->async_tx)) {
/* move this slot to the completed_slots */
list_move_tail(&desc->node, &mv_chan->completed_slots);
if (!list_empty(&desc->sg_tx_list)) {
list_splice_tail_init(&desc->sg_tx_list,
&mv_chan->completed_slots);
}
} else {
list_move_tail(&desc->node, &mv_chan->free_slots);
if (!list_empty(&desc->sg_tx_list)) {
list_splice_tail_init(&desc->sg_tx_list,
&mv_chan->free_slots);
}
}
return 0;
}
/* This function must be called with the mv_xor_chan spinlock held */
static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
dma_cookie_t cookie = 0;
int busy = mv_chan_is_busy(mv_chan);
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int current_cleaned = 0;
struct mv_xor_desc *hw_desc;
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
mv_chan_clean_completed_slots(mv_chan);
/* free completed slots from the chain starting with
* the oldest descriptor
*/
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
node) {
/* clean finished descriptors */
hw_desc = iter->hw_desc;
if (hw_desc->status & XOR_DESC_SUCCESS) {
cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
cookie);
/* done processing desc, clean slot */
mv_desc_clean_slot(iter, mv_chan);
/* break if we did cleaned the current */
if (iter->async_tx.phys == current_desc) {
current_cleaned = 1;
break;
}
} else {
if (iter->async_tx.phys == current_desc) {
current_cleaned = 0;
break;
}
}
}
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
if (current_cleaned) {
/*
* current descriptor cleaned and removed, run
* from list head
*/
iter = list_entry(mv_chan->chain.next,
struct mv_xor_desc_slot,
node);
mv_chan_start_new_chain(mv_chan, iter);
} else {
if (!list_is_last(&iter->node, &mv_chan->chain)) {
/*
* descriptors are still waiting after
* current, trigger them
*/
iter = list_entry(iter->node.next,
struct mv_xor_desc_slot,
node);
mv_chan_start_new_chain(mv_chan, iter);
} else {
/*
* some descriptors are still waiting
* to be cleaned
*/
tasklet_schedule(&mv_chan->irq_tasklet);
}
}
}
if (cookie > 0)
mv_chan->dmachan.completed_cookie = cookie;
}
static void mv_xor_tasklet(struct tasklet_struct *t)
{
struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
spin_unlock(&chan->lock);
}
static struct mv_xor_desc_slot *
mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter;
spin_lock_bh(&mv_chan->lock);
if (!list_empty(&mv_chan->free_slots)) {
iter = list_first_entry(&mv_chan->free_slots,
struct mv_xor_desc_slot,
node);
list_move_tail(&iter->node, &mv_chan->allocated_slots);
spin_unlock_bh(&mv_chan->lock);
/* pre-ack descriptor */
async_tx_ack(&iter->async_tx);
iter->async_tx.cookie = -EBUSY;
return iter;
}
spin_unlock_bh(&mv_chan->lock);
/* try to free some slots if the allocation fails */
tasklet_schedule(&mv_chan->irq_tasklet);
return NULL;
}
/************************ DMA engine API functions ****************************/
static dma_cookie_t
mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
struct mv_xor_desc_slot *old_chain_tail;
dma_cookie_t cookie;
int new_hw_chain = 1;
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
spin_lock_bh(&mv_chan->lock);
cookie = dma_cookie_assign(tx);
if (list_empty(&mv_chan->chain))
list_move_tail(&sw_desc->node, &mv_chan->chain);
else {
new_hw_chain = 0;
old_chain_tail = list_entry(mv_chan->chain.prev,
struct mv_xor_desc_slot,
node);
list_move_tail(&sw_desc->node, &mv_chan->chain);
dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
&old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
/* if the channel is not busy */
if (!mv_chan_is_busy(mv_chan)) {
u32 current_desc = mv_chan_get_current_desc(mv_chan);
/*
* and the current desc is the end of the chain before
* the append, then we need to start the channel
*/
if (current_desc == old_chain_tail->async_tx.phys)
new_hw_chain = 1;
}
}
if (new_hw_chain)
mv_chan_start_new_chain(mv_chan, sw_desc);
spin_unlock_bh(&mv_chan->lock);
return cookie;
}
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
void *virt_desc;
dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
/* Allocate descriptor slots */
idx = mv_chan->slots_allocated;
while (idx < num_descs_in_pool) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
dev_info(mv_chan_to_devp(mv_chan),
"channel only initialized %d descriptor slots",
idx);
break;
}
virt_desc = mv_chan->dma_desc_pool_virt;
slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->node);
INIT_LIST_HEAD(&slot->sg_tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
mv_chan->slots_allocated = idx;
list_add_tail(&slot->node, &mv_chan->free_slots);
spin_unlock_bh(&mv_chan->lock);
}
dev_dbg(mv_chan_to_devp(mv_chan),
"allocated %d descriptor slots\n",
mv_chan->slots_allocated);
return mv_chan->slots_allocated ? : -ENOMEM;
}
/*
* Check if source or destination is an PCIe/IO address (non-SDRAM) and add
* a new MBus window if necessary. Use a cache for these check so that
* the MMIO mapped registers don't have to be accessed for this check
* to speed up this process.
*/
static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
{
struct mv_xor_device *xordev = mv_chan->xordev;
void __iomem *base = mv_chan->mmr_high_base;
u32 win_enable;
u32 size;
u8 target, attr;
int ret;
int i;
/* Nothing needs to get done for the Armada 3700 */
if (xordev->xor_type == XOR_ARMADA_37XX)
return 0;
/*
* Loop over the cached windows to check, if the requested area
* is already mapped. If this the case, nothing needs to be done
* and we can return.
*/
for (i = 0; i < WINDOW_COUNT; i++) {
if (addr >= xordev->win_start[i] &&
addr <= xordev->win_end[i]) {
/* Window is already mapped */
return 0;
}
}
/*
* The window is not mapped, so we need to create the new mapping
*/
/* If no IO window is found that addr has to be located in SDRAM */
ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
if (ret < 0)
return 0;
/*
* Mask the base addr 'addr' according to 'size' read back from the
* MBus window. Otherwise we might end up with an address located
* somewhere in the middle of this area here.
*/
size -= 1;
addr &= ~size;
/*
* Reading one of both enabled register is enough, as they are always
* programmed to the identical values
*/
win_enable = readl(base + WINDOW_BAR_ENABLE(0));
/* Set 'i' to the first free window to write the new values to */
i = ffs(~win_enable) - 1;
if (i >= WINDOW_COUNT)
return -ENOMEM;
writel((addr & 0xffff0000) | (attr << 8) | target,
base + WINDOW_BASE(i));
writel(size & 0xffff0000, base + WINDOW_SIZE(i));
/* Fill the caching variables for later use */
xordev->win_start[i] = addr;
xordev->win_end[i] = addr + size;
win_enable |= (1 << i);
win_enable |= 3 << (16 + (2 * i));
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
return 0;
}
static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *sw_desc;
int ret;
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
__func__, src_cnt, len, &dest, flags);
/* Check if a new window needs to get added for 'dest' */
ret = mv_xor_add_io_win(mv_chan, dest);
if (ret)
return NULL;
sw_desc = mv_chan_alloc_slot(mv_chan);
if (sw_desc) {
sw_desc->type = DMA_XOR;
sw_desc->async_tx.flags = flags;
mv_desc_init(sw_desc, dest, len, flags);
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
mv_desc_set_mode(sw_desc);
while (src_cnt--) {
/* Check if a new window needs to get added for 'src' */
ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
if (ret)
return NULL;
mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
}
}
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
}
static struct dma_async_tx_descriptor *
mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
/*
* A MEMCPY operation is identical to an XOR operation with only
* a single source address.
*/
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
static struct dma_async_tx_descriptor *
mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_addr_t src, dest;
size_t len;
src = mv_chan->dummy_src_addr;
dest = mv_chan->dummy_dst_addr;
len = MV_XOR_MIN_BYTE_COUNT;
/*
* We implement the DMA_INTERRUPT operation as a minimum sized
* XOR operation with a single dummy source address.
*/
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *iter, *_iter;
int in_use_descs = 0;
spin_lock_bh(&mv_chan->lock);
mv_chan_slot_cleanup(mv_chan);
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
node) {
in_use_descs++;
list_move_tail(&iter->node, &mv_chan->free_slots);
}
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
in_use_descs++;
list_move_tail(&iter->node, &mv_chan->free_slots);
}
list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
node) {
in_use_descs++;
list_move_tail(&iter->node, &mv_chan->free_slots);
}
list_for_each_entry_safe_reverse(
iter, _iter, &mv_chan->free_slots, node) {
list_del(&iter->node);
kfree(iter);
mv_chan->slots_allocated--;
}
dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
__func__, mv_chan->slots_allocated);
spin_unlock_bh(&mv_chan->lock);
if (in_use_descs)
dev_err(mv_chan_to_devp(mv_chan),
"freeing %d in use descriptors!\n", in_use_descs);
}
/**
* mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle
* @cookie: XOR transaction identifier
* @txstate: XOR transactions state holder (or NULL)
*/
static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
spin_lock_bh(&mv_chan->lock);
mv_chan_slot_cleanup(mv_chan);
spin_unlock_bh(&mv_chan->lock);
return dma_cookie_status(chan, cookie, txstate);
}
static void mv_chan_dump_regs(struct mv_xor_chan *chan)
{
u32 val;
val = readl_relaxed(XOR_CONFIG(chan));
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
val = readl_relaxed(XOR_ACTIVATION(chan));
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
val = readl_relaxed(XOR_INTR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
val = readl_relaxed(XOR_INTR_MASK(chan));
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
val = readl_relaxed(XOR_ERROR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
val = readl_relaxed(XOR_ERROR_ADDR(chan));
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
if (intr_cause & XOR_INT_ERR_DECODE) {
dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
return;
}
dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
chan->idx, intr_cause);
mv_chan_dump_regs(chan);
WARN_ON(1);
}
static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
{
struct mv_xor_chan *chan = data;
u32 intr_cause = mv_chan_get_intr_cause(chan);
dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
if (intr_cause & XOR_INTR_ERRORS)
mv_chan_err_interrupt_handler(chan, intr_cause);
tasklet_schedule(&chan->irq_tasklet);
mv_chan_clear_eoc_cause(chan);
return IRQ_HANDLED;
}
static void mv_xor_issue_pending(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
if (mv_chan->pending >= MV_XOR_THRESHOLD) {
mv_chan->pending = 0;
mv_chan_activate(mv_chan);
}
}
/*
* Perform a transaction to verify the HW works.
*/
static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
int i, ret;
void *src, *dest;
dma_addr_t src_dma, dest_dma;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
int err = 0;
src = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
offset_in_page(src), PAGE_SIZE,
DMA_TO_DEVICE);
unmap->addr[0] = src_dma;
ret = dma_mapping_error(dma_chan->device->dev, src_dma);
if (ret) {
err = -ENOMEM;
goto free_resources;
}
unmap->to_cnt = 1;
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
offset_in_page(dest), PAGE_SIZE,
DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma;
ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
if (ret) {
err = -ENOMEM;
goto free_resources;
}
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
PAGE_SIZE, 0);
if (!tx) {
dev_err(dma_chan->device->dev,
"Self-test cannot prepare operation, disabling\n");
err = -ENODEV;
goto free_resources;
}
cookie = mv_xor_tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(dma_chan->device->dev,
"Self-test submit error, disabling\n");
err = -ENODEV;
goto free_resources;
}
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
kfree(dest);
return err;
}
#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
static int
mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
{
int i, src_idx, ret;
struct page *dest;
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
int src_count = MV_XOR_NUM_SRC_TEST;
for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
__free_page(xor_srcs[src_idx]);
return -ENOMEM;
}
}
dest = alloc_page(GFP_KERNEL);
if (!dest) {
while (src_idx--)
__free_page(xor_srcs[src_idx]);
return -ENOMEM;
}
/* Fill in src buffers */
for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
(cmp_byte << 8) | cmp_byte;
memset(page_address(dest), 0, PAGE_SIZE);
dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
GFP_KERNEL);
if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
/* test xor */
for (i = 0; i < src_count; i++) {
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
if (ret) {
err = -ENOMEM;
goto free_resources;
}
unmap->to_cnt++;
}
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
if (ret) {
err = -ENOMEM;
goto free_resources;
}
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
src_count, PAGE_SIZE, 0);
if (!tx) {
dev_err(dma_chan->device->dev,
"Self-test cannot prepare operation, disabling\n");
err = -ENODEV;
goto free_resources;
}
cookie = mv_xor_tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(dma_chan->device->dev,
"Self-test submit error, disabling\n");
err = -ENODEV;
goto free_resources;
}
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
dev_err(dma_chan->device->dev,
"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
i, ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
}
free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
return err;
}
static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{
struct dma_chan *chan, *_chan;
struct device *dev = mv_chan->dmadev.dev;
dma_async_device_unregister(&mv_chan->dmadev);
dma_free_coherent(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
dma_unmap_single(dev, mv_chan->dummy_dst_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
device_node) {
list_del(&chan->device_node);
}
free_irq(mv_chan->irq, mv_chan);
return 0;
}
static struct mv_xor_chan *
mv_xor_channel_add(struct mv_xor_device *xordev,
struct platform_device *pdev,
int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
struct mv_xor_chan *mv_chan;
struct dma_device *dma_dev;
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
if (!mv_chan)
return ERR_PTR(-ENOMEM);
mv_chan->idx = idx;
mv_chan->irq = irq;
if (xordev->xor_type == XOR_ORION)
mv_chan->op_in_desc = XOR_MODE_IN_REG;
else
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
dma_dev->dev = &pdev->dev;
mv_chan->xordev = xordev;
/*
* These source and destination dummy buffers are used to implement
* a DMA_INTERRUPT operation as a minimum-sized XOR operation.
* Hence, we only need to map the buffers at initialization-time.
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
/* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
/* set prep routines based on capability */
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
mv_chan->mmr_base = xordev->xor_base;
mv_chan->mmr_high_base = xordev->xor_high_base;
tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
/* clear errors before enabling interrupts */
mv_chan_clear_err_status(mv_chan);
ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
0, dev_name(&pdev->dev), mv_chan);
if (ret)
goto err_free_dma;
mv_chan_unmask_interrupts(mv_chan);
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
else
mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
spin_lock_init(&mv_chan->lock);
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->free_slots);
INIT_LIST_HEAD(&mv_chan->allocated_slots);
mv_chan->dmachan.device = dma_dev;
dma_cookie_init(&mv_chan->dmachan);
list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
ret = mv_chan_memcpy_self_test(mv_chan);
dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
if (ret)
goto err_free_irq;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
ret = mv_chan_xor_self_test(mv_chan);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
goto err_free_irq;
}
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_free_irq;
return mv_chan;
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
return ERR_PTR(ret);
}
static void
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
for (i = 0; i < 8; i++) {
writel(0, base + WINDOW_BASE(i));
writel(0, base + WINDOW_SIZE(i));
if (i < 4)
writel(0, base + WINDOW_REMAP_HIGH(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
dram->mbus_dram_target_id, base + WINDOW_BASE(i));
writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
/* Fill the caching variables for later use */
xordev->win_start[i] = cs->base;
xordev->win_end[i] = cs->base + cs->size - 1;
win_enable |= (1 << i);
win_enable |= 3 << (16 + (2 * i));
}
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
writel(0, base + WINDOW_OVERRIDE_CTRL(0));
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
static void
mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
{
void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
for (i = 0; i < 8; i++) {
writel(0, base + WINDOW_BASE(i));
writel(0, base + WINDOW_SIZE(i));
if (i < 4)
writel(0, base + WINDOW_REMAP_HIGH(i));
}
/*
* For Armada3700 open default 4GB Mbus window. The dram
* related configuration are done at AXIS level.
*/
writel(0xffff0000, base + WINDOW_SIZE(0));
win_enable |= 1;
win_enable |= 3 << 16;
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
writel(0, base + WINDOW_OVERRIDE_CTRL(0));
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
/*
* Since this XOR driver is basically used only for RAID5, we don't
* need to care about synchronizing ->suspend with DMA activity,
* because the DMA engine will naturally be quiet due to the block
* devices being suspended.
*/
static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_xor_device *xordev = platform_get_drvdata(pdev);
int i;
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_chan *mv_chan = xordev->channels[i];
if (!mv_chan)
continue;
mv_chan->saved_config_reg =
readl_relaxed(XOR_CONFIG(mv_chan));
mv_chan->saved_int_mask_reg =
readl_relaxed(XOR_INTR_MASK(mv_chan));
}
return 0;
}
static int mv_xor_resume(struct platform_device *dev)
{
struct mv_xor_device *xordev = platform_get_drvdata(dev);
const struct mbus_dram_target_info *dram;
int i;
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_chan *mv_chan = xordev->channels[i];
if (!mv_chan)
continue;
writel_relaxed(mv_chan->saved_config_reg,
XOR_CONFIG(mv_chan));
writel_relaxed(mv_chan->saved_int_mask_reg,
XOR_INTR_MASK(mv_chan));
}
if (xordev->xor_type == XOR_ARMADA_37XX) {
mv_xor_conf_mbus_windows_a3700(xordev);
return 0;
}
dram = mv_mbus_dram_info();
if (dram)
mv_xor_conf_mbus_windows(xordev, dram);
return 0;
}
static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
{},
};
static unsigned int mv_xor_engine_count;
static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
struct mv_xor_device *xordev;
struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
unsigned int max_engines, max_channels;
int i, ret;
dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
if (!xordev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xordev->xor_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xordev->xor_high_base)
return -EBUSY;
platform_set_drvdata(pdev, xordev);
/*
* We need to know which type of XOR device we use before
* setting up. In non-dt case it can only be the legacy one.
*/
xordev->xor_type = XOR_ORION;
if (pdev->dev.of_node)
xordev->xor_type = (uintptr_t)device_get_match_data(&pdev->dev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
if (xordev->xor_type == XOR_ARMADA_37XX) {
mv_xor_conf_mbus_windows_a3700(xordev);
} else {
dram = mv_mbus_dram_info();
if (dram)
mv_xor_conf_mbus_windows(xordev, dram);
}
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
*/
xordev->clk = clk_get(&pdev->dev, NULL);
if (!IS_ERR(xordev->clk))
clk_prepare_enable(xordev->clk);
/*
* We don't want to have more than one channel per CPU in
* order for async_tx to perform well. So we limit the number
* of engines and channels so that we take into account this
* constraint. Note that we also want to use channels from
* separate engines when possible. For dual-CPU Armada 3700
* SoC with single XOR engine allow using its both channels.
*/
max_engines = num_present_cpus();
if (xordev->xor_type == XOR_ARMADA_37XX)
max_channels = num_present_cpus();
else
max_channels = min_t(unsigned int,
MV_XOR_MAX_CHANNELS,
DIV_ROUND_UP(num_present_cpus(), 2));
if (mv_xor_engine_count >= max_engines)
return 0;
if (pdev->dev.of_node) {
struct device_node *np;
int i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
if (i >= max_channels)
continue;
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -ENODEV;
goto err_channel_add;
}
chan = mv_xor_channel_add(xordev, pdev, i,
cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
goto err_channel_add;
}
xordev->channels[i] = chan;
i++;
}
} else if (pdata && pdata->channels) {
for (i = 0; i < max_channels; i++) {
struct mv_xor_channel_data *cd;
struct mv_xor_chan *chan;
int irq;
cd = &pdata->channels[i];
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto err_channel_add;
}
chan = mv_xor_channel_add(xordev, pdev, i,
cd->cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_channel_add;
}
xordev->channels[i] = chan;
}
}
return 0;
err_channel_add:
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
if (xordev->channels[i]) {
mv_xor_channel_remove(xordev->channels[i]);
if (pdev->dev.of_node)
irq_dispose_mapping(xordev->channels[i]->irq);
}
if (!IS_ERR(xordev->clk)) {
clk_disable_unprepare(xordev->clk);
clk_put(xordev->clk);
}
return ret;
}
static struct platform_driver mv_xor_driver = {
.probe = mv_xor_probe,
.suspend = mv_xor_suspend,
.resume = mv_xor_resume,
.driver = {
.name = MV_XOR_NAME,
.of_match_table = mv_xor_dt_ids,
},
};
builtin_platform_driver(mv_xor_driver);
/*
MODULE_AUTHOR("Saeed Bishara <[email protected]>");
MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
MODULE_LICENSE("GPL");
*/
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* AFS cell and server record management
*
* Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/slab.h>
#include <linux/key.h>
#include <linux/ctype.h>
#include <linux/dns_resolver.h>
#include <linux/sched.h>
#include <linux/inet.h>
#include <linux/namei.h>
#include <keys/rxrpc-type.h>
#include "internal.h"
static unsigned __read_mostly afs_cell_gc_delay = 10;
static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static atomic_t cell_debug_id;
static void afs_queue_cell_manager(struct afs_net *);
static void afs_manage_cell_work(struct work_struct *);
static void afs_dec_cells_outstanding(struct afs_net *net)
{
if (atomic_dec_and_test(&net->cells_outstanding))
wake_up_var(&net->cells_outstanding);
}
/*
* Set the cell timer to fire after a given delay, assuming it's not already
* set for an earlier time.
*/
static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
{
if (net->live) {
atomic_inc(&net->cells_outstanding);
if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
afs_dec_cells_outstanding(net);
} else {
afs_queue_cell_manager(net);
}
}
/*
* Look up and get an activation reference on a cell record. The caller must
* hold net->cells_lock at least read-locked.
*/
static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
const char *name, unsigned int namesz,
enum afs_cell_trace reason)
{
struct afs_cell *cell = NULL;
struct rb_node *p;
int n;
_enter("%*.*s", namesz, namesz, name);
if (name && namesz == 0)
return ERR_PTR(-EINVAL);
if (namesz > AFS_MAXCELLNAME)
return ERR_PTR(-ENAMETOOLONG);
if (!name) {
cell = net->ws_cell;
if (!cell)
return ERR_PTR(-EDESTADDRREQ);
goto found;
}
p = net->cells.rb_node;
while (p) {
cell = rb_entry(p, struct afs_cell, net_node);
n = strncasecmp(cell->name, name,
min_t(size_t, cell->name_len, namesz));
if (n == 0)
n = cell->name_len - namesz;
if (n < 0)
p = p->rb_left;
else if (n > 0)
p = p->rb_right;
else
goto found;
}
return ERR_PTR(-ENOENT);
found:
return afs_use_cell(cell, reason);
}
/*
* Look up and get an activation reference on a cell record.
*/
struct afs_cell *afs_find_cell(struct afs_net *net,
const char *name, unsigned int namesz,
enum afs_cell_trace reason)
{
struct afs_cell *cell;
down_read(&net->cells_lock);
cell = afs_find_cell_locked(net, name, namesz, reason);
up_read(&net->cells_lock);
return cell;
}
/*
* Set up a cell record and fill in its name, VL server address list and
* allocate an anonymous key
*/
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
struct afs_vlserver_list *vllist;
struct afs_cell *cell;
int i, ret;
ASSERT(name);
if (namelen == 0)
return ERR_PTR(-EINVAL);
if (namelen > AFS_MAXCELLNAME) {
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
}
/* Prohibit cell names that contain unprintable chars, '/' and '@' or
* that begin with a dot. This also precludes "@cell".
*/
if (name[0] == '.')
return ERR_PTR(-EINVAL);
for (i = 0; i < namelen; i++) {
char ch = name[i];
if (!isprint(ch) || ch == '/' || ch == '@')
return ERR_PTR(-EINVAL);
}
_enter("%*.*s,%s", namelen, namelen, name, addresses);
cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
if (!cell) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
cell->name = kmalloc(namelen + 1, GFP_KERNEL);
if (!cell->name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
cell->net = net;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
cell->name[i] = 0;
refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
INIT_WORK(&cell->manager, afs_manage_cell_work);
init_rwsem(&cell->vs_lock);
cell->volumes = RB_ROOT;
INIT_HLIST_HEAD(&cell->proc_volumes);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
/* Provide a VL server list, filling it in if we were given a list of
* addresses to use.
*/
if (addresses) {
vllist = afs_parse_text_addrs(net,
addresses, strlen(addresses), ':',
VL_SERVICE, AFS_VL_PORT);
if (IS_ERR(vllist)) {
ret = PTR_ERR(vllist);
goto parse_failed;
}
vllist->source = DNS_RECORD_FROM_CONFIG;
vllist->status = DNS_LOOKUP_NOT_DONE;
cell->dns_expiry = TIME64_MAX;
} else {
ret = -ENOMEM;
vllist = afs_alloc_vlserver_list(0);
if (!vllist)
goto error;
vllist->source = DNS_RECORD_UNAVAILABLE;
vllist->status = DNS_LOOKUP_NOT_DONE;
cell->dns_expiry = ktime_get_real_seconds();
}
rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_source = vllist->source;
cell->dns_status = vllist->status;
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
atomic_inc(&net->cells_outstanding);
cell->debug_id = atomic_inc_return(&cell_debug_id);
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
_leave(" = %p", cell);
return cell;
parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
kfree(cell->name);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
* afs_lookup_cell - Look up or create a cell record.
* @net: The network namespace
* @name: The name of the cell.
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
* @excl: T if an error should be given if the cell name already exists.
*
* Look up a cell record by name and query the DNS for VL server addresses if
* needed. Note that that actual DNS query is punted off to the manager thread
* so that this function can return immediately if interrupted whilst allowing
* cell records to be shared even if not yet fully constructed.
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
const char *vllist, bool excl)
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
enum afs_cell_state state;
int ret, n;
_enter("%s,%s", name, vllist);
if (!excl) {
cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
if (!IS_ERR(cell))
goto wait_for_cell;
}
/* Assume we're probably going to create a cell and preallocate and
* mostly set up a candidate record. We can then use this to stash the
* name, the net namespace and VL server addresses.
*
* We also want to do this before we hold any locks as it may involve
* upcalling to userspace to make DNS queries.
*/
candidate = afs_alloc_cell(net, name, namesz, vllist);
if (IS_ERR(candidate)) {
_leave(" = %ld", PTR_ERR(candidate));
return candidate;
}
/* Find the insertion point and check to see if someone else added a
* cell whilst we were allocating.
*/
down_write(&net->cells_lock);
pp = &net->cells.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
cursor = rb_entry(parent, struct afs_cell, net_node);
n = strncasecmp(cursor->name, name,
min_t(size_t, cursor->name_len, namesz));
if (n == 0)
n = cursor->name_len - namesz;
if (n < 0)
pp = &(*pp)->rb_left;
else if (n > 0)
pp = &(*pp)->rb_right;
else
goto cell_already_exists;
}
cell = candidate;
candidate = NULL;
atomic_set(&cell->active, 2);
trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
afs_queue_cell(cell, afs_cell_trace_get_queue_new);
wait_for_cell:
trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
afs_cell_trace_wait);
_debug("wait_for_cell");
wait_var_event(&cell->state,
({
state = smp_load_acquire(&cell->state); /* vs error */
state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
}));
/* Check the state obtained from the wait check. */
if (state == AFS_CELL_REMOVED) {
ret = cell->error;
goto error;
}
_leave(" = %p [cell]", cell);
return cell;
cell_already_exists:
_debug("cell exists");
cell = cursor;
if (excl) {
ret = -EEXIST;
} else {
afs_use_cell(cursor, afs_cell_trace_use_lookup);
ret = 0;
}
up_write(&net->cells_lock);
if (candidate)
afs_put_cell(candidate, afs_cell_trace_put_candidate);
if (ret == 0)
goto wait_for_cell;
goto error_noput;
error:
afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
error_noput:
_leave(" = %d [error]", ret);
return ERR_PTR(ret);
}
/*
* set the root cell information
* - can be called with a module parameter string
* - can be called from a write to /proc/fs/afs/rootcell
*/
int afs_cell_init(struct afs_net *net, const char *rootcell)
{
struct afs_cell *old_root, *new_root;
const char *cp, *vllist;
size_t len;
_enter("");
if (!rootcell) {
/* module is loaded with no parameters, or built statically.
* - in the future we might initialize cell DB here.
*/
_leave(" = 0 [no root]");
return 0;
}
cp = strchr(rootcell, ':');
if (!cp) {
_debug("kAFS: no VL server IP addresses specified");
vllist = NULL;
len = strlen(rootcell);
} else {
vllist = cp + 1;
len = cp - rootcell;
}
/* allocate a cell record for the root cell */
new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
}
if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
afs_use_cell(new_root, afs_cell_trace_use_pin);
/* install the new cell */
down_write(&net->cells_lock);
afs_see_cell(new_root, afs_cell_trace_see_ws);
old_root = net->ws_cell;
net->ws_cell = new_root;
up_write(&net->cells_lock);
afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
_leave(" = 0");
return 0;
}
/*
* Update a cell's VL server address list from the DNS.
*/
static int afs_update_cell(struct afs_cell *cell)
{
struct afs_vlserver_list *vllist, *old = NULL, *p;
unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
time64_t now, expiry = 0;
int ret = 0;
_enter("%s", cell->name);
vllist = afs_dns_query(cell, &expiry);
if (IS_ERR(vllist)) {
ret = PTR_ERR(vllist);
_debug("%s: fail %d", cell->name, ret);
if (ret == -ENOMEM)
goto out_wake;
vllist = afs_alloc_vlserver_list(0);
if (!vllist) {
if (ret >= 0)
ret = -ENOMEM;
goto out_wake;
}
switch (ret) {
case -ENODATA:
case -EDESTADDRREQ:
vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
break;
case -EAGAIN:
case -ECONNREFUSED:
vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
break;
default:
vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
break;
}
}
_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
cell->dns_status = vllist->status;
now = ktime_get_real_seconds();
if (min_ttl > max_ttl)
max_ttl = min_ttl;
if (expiry < now + min_ttl)
expiry = now + min_ttl;
else if (expiry > now + max_ttl)
expiry = now + max_ttl;
_debug("%s: status %d", cell->name, vllist->status);
if (vllist->source == DNS_RECORD_UNAVAILABLE) {
switch (vllist->status) {
case DNS_LOOKUP_GOT_NOT_FOUND:
/* The DNS said that the cell does not exist or there
* weren't any addresses to be had.
*/
cell->dns_expiry = expiry;
break;
case DNS_LOOKUP_BAD:
case DNS_LOOKUP_GOT_LOCAL_FAILURE:
case DNS_LOOKUP_GOT_TEMP_FAILURE:
case DNS_LOOKUP_GOT_NS_FAILURE:
default:
cell->dns_expiry = now + 10;
break;
}
} else {
cell->dns_expiry = expiry;
}
/* Replace the VL server list if the new record has servers or the old
* record doesn't.
*/
write_lock(&cell->vl_servers_lock);
p = rcu_dereference_protected(cell->vl_servers, true);
if (vllist->nr_servers > 0 || p->nr_servers == 0) {
rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_source = vllist->source;
old = p;
}
write_unlock(&cell->vl_servers_lock);
afs_put_vlserverlist(cell->net, old);
out_wake:
smp_store_release(&cell->dns_lookup_count,
cell->dns_lookup_count + 1); /* vs source/status */
wake_up_var(&cell->dns_lookup_count);
_leave(" = %d", ret);
return ret;
}
/*
* Destroy a cell record
*/
static void afs_cell_destroy(struct rcu_head *rcu)
{
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
struct afs_net *net = cell->net;
int r;
_enter("%p{%s}", cell, cell->name);
r = refcount_read(&cell->ref);
ASSERTCMP(r, ==, 0);
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
kfree(cell->name);
kfree(cell);
afs_dec_cells_outstanding(net);
_leave(" [destroyed]");
}
/*
* Queue the cell manager.
*/
static void afs_queue_cell_manager(struct afs_net *net)
{
int outstanding = atomic_inc_return(&net->cells_outstanding);
_enter("%d", outstanding);
if (!queue_work(afs_wq, &net->cells_manager))
afs_dec_cells_outstanding(net);
}
/*
* Cell management timer. We have an increment on cells_outstanding that we
* need to pass along to the work item.
*/
void afs_cells_timer(struct timer_list *timer)
{
struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
_enter("");
if (!queue_work(afs_wq, &net->cells_manager))
afs_dec_cells_outstanding(net);
}
/*
* Get a reference on a cell record.
*/
struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r;
__refcount_inc(&cell->ref, &r);
trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
return cell;
}
/*
* Drop a reference on a cell record.
*/
void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
if (cell) {
unsigned int debug_id = cell->debug_id;
unsigned int a;
bool zero;
int r;
a = atomic_read(&cell->active);
zero = __refcount_dec_and_test(&cell->ref, &r);
trace_afs_cell(debug_id, r - 1, a, reason);
if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
call_rcu(&cell->rcu, afs_cell_destroy);
}
}
}
/*
* Note a cell becoming more active.
*/
struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r, a;
r = refcount_read(&cell->ref);
WARN_ON(r == 0);
a = atomic_inc_return(&cell->active);
trace_afs_cell(cell->debug_id, r, a, reason);
return cell;
}
/*
* Record a cell becoming less active. When the active counter reaches 1, it
* is scheduled for destruction, but may get reactivated.
*/
void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
{
unsigned int debug_id;
time64_t now, expire_delay;
int r, a;
if (!cell)
return;
_enter("%s", cell->name);
now = ktime_get_real_seconds();
cell->last_inactive = now;
expire_delay = 0;
if (cell->vl_servers->nr_servers)
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
r = refcount_read(&cell->ref);
a = atomic_dec_return(&cell->active);
trace_afs_cell(debug_id, r, a, reason);
WARN_ON(a == 0);
if (a == 1)
/* 'cell' may now be garbage collected. */
afs_set_cell_timer(net, expire_delay);
}
/*
* Note that a cell has been seen.
*/
void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r, a;
r = refcount_read(&cell->ref);
a = atomic_read(&cell->active);
trace_afs_cell(cell->debug_id, r, a, reason);
}
/*
* Queue a cell for management, giving the workqueue a ref to hold.
*/
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
afs_get_cell(cell, reason);
if (!queue_work(afs_wq, &cell->manager))
afs_put_cell(cell, afs_cell_trace_put_queue_fail);
}
/*
* Allocate a key to use as a placeholder for anonymous user security.
*/
static int afs_alloc_anon_key(struct afs_cell *cell)
{
struct key *key;
char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
/* Create a key to represent an anonymous user. */
memcpy(keyname, "afs@", 4);
dp = keyname + 4;
cp = cell->name;
do {
*dp++ = tolower(*cp);
} while (*cp++);
key = rxrpc_get_null_key(keyname);
if (IS_ERR(key))
return PTR_ERR(key);
cell->anonymous_key = key;
_debug("anon key %p{%x}",
cell->anonymous_key, key_serial(cell->anonymous_key));
return 0;
}
/*
* Activate a cell.
*/
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
{
struct hlist_node **p;
struct afs_cell *pcell;
int ret;
if (!cell->anonymous_key) {
ret = afs_alloc_anon_key(cell);
if (ret < 0)
return ret;
}
ret = afs_proc_cell_setup(cell);
if (ret < 0)
return ret;
mutex_lock(&net->proc_cells_lock);
for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
pcell = hlist_entry(*p, struct afs_cell, proc_link);
if (strcmp(cell->name, pcell->name) < 0)
break;
}
cell->proc_link.pprev = p;
cell->proc_link.next = *p;
rcu_assign_pointer(*p, &cell->proc_link.next);
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
}
/*
* Deactivate a cell.
*/
static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
{
_enter("%s", cell->name);
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
_leave("");
}
/*
* Manage a cell record, initialising and destroying it, maintaining its DNS
* records.
*/
static void afs_manage_cell(struct afs_cell *cell)
{
struct afs_net *net = cell->net;
int ret, active;
_enter("%s", cell->name);
again:
_debug("state %u", cell->state);
switch (cell->state) {
case AFS_CELL_INACTIVE:
case AFS_CELL_FAILED:
down_write(&net->cells_lock);
active = 1;
if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
rb_erase(&cell->net_node, &net->cells);
trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
afs_cell_trace_unuse_delete);
smp_store_release(&cell->state, AFS_CELL_REMOVED);
}
up_write(&net->cells_lock);
if (cell->state == AFS_CELL_REMOVED) {
wake_up_var(&cell->state);
goto final_destruction;
}
if (cell->state == AFS_CELL_FAILED)
goto done;
smp_store_release(&cell->state, AFS_CELL_UNSET);
wake_up_var(&cell->state);
goto again;
case AFS_CELL_UNSET:
smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
wake_up_var(&cell->state);
goto again;
case AFS_CELL_ACTIVATING:
ret = afs_activate_cell(net, cell);
if (ret < 0)
goto activation_failed;
smp_store_release(&cell->state, AFS_CELL_ACTIVE);
wake_up_var(&cell->state);
goto again;
case AFS_CELL_ACTIVE:
if (atomic_read(&cell->active) > 1) {
if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
ret = afs_update_cell(cell);
if (ret < 0)
cell->error = ret;
}
goto done;
}
smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
wake_up_var(&cell->state);
goto again;
case AFS_CELL_DEACTIVATING:
if (atomic_read(&cell->active) > 1)
goto reverse_deactivation;
afs_deactivate_cell(net, cell);
smp_store_release(&cell->state, AFS_CELL_INACTIVE);
wake_up_var(&cell->state);
goto again;
case AFS_CELL_REMOVED:
goto done;
default:
break;
}
_debug("bad state %u", cell->state);
BUG(); /* Unhandled state */
activation_failed:
cell->error = ret;
afs_deactivate_cell(net, cell);
smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
wake_up_var(&cell->state);
goto again;
reverse_deactivation:
smp_store_release(&cell->state, AFS_CELL_ACTIVE);
wake_up_var(&cell->state);
_leave(" [deact->act]");
return;
done:
_leave(" [done %u]", cell->state);
return;
final_destruction:
/* The root volume is pinning the cell */
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
cell->root_volume = NULL;
afs_put_cell(cell, afs_cell_trace_put_destroy);
}
static void afs_manage_cell_work(struct work_struct *work)
{
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
afs_manage_cell(cell);
afs_put_cell(cell, afs_cell_trace_put_queue_work);
}
/*
* Manage the records of cells known to a network namespace. This includes
* updating the DNS records and garbage collecting unused cells that were
* automatically added.
*
* Note that constructed cell records may only be removed from net->cells by
* this work item, so it is safe for this work item to stash a cursor pointing
* into the tree and then return to caller (provided it skips cells that are
* still under construction).
*
* Note also that we were given an increment on net->cells_outstanding by
* whoever queued us that we need to deal with before returning.
*/
void afs_manage_cells(struct work_struct *work)
{
struct afs_net *net = container_of(work, struct afs_net, cells_manager);
struct rb_node *cursor;
time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
bool purging = !net->live;
_enter("");
/* Trawl the cell database looking for cells that have expired from
* lack of use and cells whose DNS results have expired and dispatch
* their managers.
*/
down_read(&net->cells_lock);
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
struct afs_cell *cell =
rb_entry(cursor, struct afs_cell, net_node);
unsigned active;
bool sched_cell = false;
active = atomic_read(&cell->active);
trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
active, afs_cell_trace_manage);
ASSERTCMP(active, >=, 1);
if (purging) {
if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
active = atomic_dec_return(&cell->active);
trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
active, afs_cell_trace_unuse_pin);
}
}
if (active == 1) {
struct afs_vlserver_list *vllist;
time64_t expire_at = cell->last_inactive;
read_lock(&cell->vl_servers_lock);
vllist = rcu_dereference_protected(
cell->vl_servers,
lockdep_is_held(&cell->vl_servers_lock));
if (vllist->nr_servers > 0)
expire_at += afs_cell_gc_delay;
read_unlock(&cell->vl_servers_lock);
if (purging || expire_at <= now)
sched_cell = true;
else if (expire_at < next_manage)
next_manage = expire_at;
}
if (!purging) {
if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
sched_cell = true;
}
if (sched_cell)
afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
}
up_read(&net->cells_lock);
/* Update the timer on the way out. We have to pass an increment on
* cells_outstanding in the namespace that we are in to the timer or
* the work scheduler.
*/
if (!purging && next_manage < TIME64_MAX) {
now = ktime_get_real_seconds();
if (next_manage - now <= 0) {
if (queue_work(afs_wq, &net->cells_manager))
atomic_inc(&net->cells_outstanding);
} else {
afs_set_cell_timer(net, next_manage - now);
}
}
afs_dec_cells_outstanding(net);
_leave(" [%d]", atomic_read(&net->cells_outstanding));
}
/*
* Purge in-memory cell database.
*/
void afs_cell_purge(struct afs_net *net)
{
struct afs_cell *ws;
_enter("");
down_write(&net->cells_lock);
ws = net->ws_cell;
net->ws_cell = NULL;
up_write(&net->cells_lock);
afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
_debug("del timer");
if (del_timer_sync(&net->cells_timer))
atomic_dec(&net->cells_outstanding);
_debug("kick mgr");
afs_queue_cell_manager(net);
_debug("wait");
wait_var_event(&net->cells_outstanding,
!atomic_read(&net->cells_outstanding));
_leave("");
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the SID table type.
*
* Original author: Stephen Smalley, <[email protected]>
* Author: Ondrej Mosnacek, <[email protected]>
*
* Copyright (C) 2018 Red Hat, Inc.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/barrier.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
#include "services.h"
struct sidtab_str_cache {
struct rcu_head rcu_member;
struct list_head lru_member;
struct sidtab_entry *parent;
u32 len;
char str[] __counted_by(len);
};
#define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
#define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
int sidtab_init(struct sidtab *s)
{
u32 i;
memset(s->roots, 0, sizeof(s->roots));
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
s->frozen = false;
s->count = 0;
s->convert = NULL;
hash_init(s->context_to_sid);
spin_lock_init(&s->lock);
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
INIT_LIST_HEAD(&s->cache_lru_list);
spin_lock_init(&s->cache_lock);
#endif
return 0;
}
static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
{
struct sidtab_entry *entry;
u32 sid = 0;
rcu_read_lock();
hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
if (entry->hash != hash)
continue;
if (context_cmp(&entry->context, context)) {
sid = entry->sid;
break;
}
}
rcu_read_unlock();
return sid;
}
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
struct sidtab_isid_entry *isid;
u32 hash;
int rc;
if (sid == 0 || sid > SECINITSID_NUM)
return -EINVAL;
isid = &s->isids[sid - 1];
rc = context_cpy(&isid->entry.context, context);
if (rc)
return rc;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
isid->entry.cache = NULL;
#endif
isid->set = 1;
hash = context_compute_hash(context);
/*
* Multiple initial sids may map to the same context. Check that this
* context is not already represented in the context_to_sid hashtable
* to avoid duplicate entries and long linked lists upon hash
* collision.
*/
if (!context_to_sid(s, context, hash)) {
isid->entry.sid = sid;
isid->entry.hash = hash;
hash_add(s->context_to_sid, &isid->entry.list, hash);
}
return 0;
}
int sidtab_hash_stats(struct sidtab *sidtab, char *page)
{
int i;
int chain_len = 0;
int slots_used = 0;
int entries = 0;
int max_chain_len = 0;
int cur_bucket = 0;
struct sidtab_entry *entry;
rcu_read_lock();
hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
entries++;
if (i == cur_bucket) {
chain_len++;
if (chain_len == 1)
slots_used++;
} else {
cur_bucket = i;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
chain_len = 0;
}
}
rcu_read_unlock();
if (chain_len > max_chain_len)
max_chain_len = chain_len;
return scnprintf(page, PAGE_SIZE,
"entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
entries, slots_used, SIDTAB_HASH_BUCKETS,
max_chain_len);
}
static u32 sidtab_level_from_count(u32 count)
{
u32 capacity = SIDTAB_LEAF_ENTRIES;
u32 level = 0;
while (count > capacity) {
capacity <<= SIDTAB_INNER_SHIFT;
++level;
}
return level;
}
static int sidtab_alloc_roots(struct sidtab *s, u32 level)
{
u32 l;
if (!s->roots[0].ptr_leaf) {
s->roots[0].ptr_leaf =
kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
if (!s->roots[0].ptr_leaf)
return -ENOMEM;
}
for (l = 1; l <= level; ++l)
if (!s->roots[l].ptr_inner) {
s->roots[l].ptr_inner =
kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
if (!s->roots[l].ptr_inner)
return -ENOMEM;
s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
}
return 0;
}
static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
int alloc)
{
union sidtab_entry_inner *entry;
u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
/* find the level of the subtree we need */
level = sidtab_level_from_count(index + 1);
capacity_shift = level * SIDTAB_INNER_SHIFT;
/* allocate roots if needed */
if (alloc && sidtab_alloc_roots(s, level) != 0)
return NULL;
/* lookup inside the subtree */
entry = &s->roots[level];
while (level != 0) {
capacity_shift -= SIDTAB_INNER_SHIFT;
--level;
entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
leaf_index &= ((u32)1 << capacity_shift) - 1;
if (!entry->ptr_inner) {
if (alloc)
entry->ptr_inner = kzalloc(
SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
if (!entry->ptr_inner)
return NULL;
}
}
if (!entry->ptr_leaf) {
if (alloc)
entry->ptr_leaf =
kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
if (!entry->ptr_leaf)
return NULL;
}
return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
}
static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
{
/* read entries only after reading count */
u32 count = smp_load_acquire(&s->count);
if (index >= count)
return NULL;
return sidtab_do_lookup(s, index, 0);
}
static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
{
return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
}
static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
int force)
{
if (sid != 0) {
struct sidtab_entry *entry;
if (sid > SECINITSID_NUM)
entry = sidtab_lookup(s, sid_to_index(sid));
else
entry = sidtab_lookup_initial(s, sid);
if (entry && (!entry->context.len || force))
return entry;
}
return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
}
struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
{
unsigned long flags;
u32 count, hash = context_compute_hash(context);
struct sidtab_convert_params *convert;
struct sidtab_entry *dst, *dst_convert;
int rc;
*sid = context_to_sid(s, context, hash);
if (*sid)
return 0;
/* lock-free search failed: lock, re-search, and insert if not found */
spin_lock_irqsave(&s->lock, flags);
rc = 0;
*sid = context_to_sid(s, context, hash);
if (*sid)
goto out_unlock;
if (unlikely(s->frozen)) {
/*
* This sidtab is now frozen - tell the caller to abort and
* get the new one.
*/
rc = -ESTALE;
goto out_unlock;
}
count = s->count;
/* bail out if we already reached max entries */
rc = -EOVERFLOW;
if (count >= SIDTAB_MAX)
goto out_unlock;
/* insert context into new entry */
rc = -ENOMEM;
dst = sidtab_do_lookup(s, count, 1);
if (!dst)
goto out_unlock;
dst->sid = index_to_sid(count);
dst->hash = hash;
rc = context_cpy(&dst->context, context);
if (rc)
goto out_unlock;
/*
* if we are building a new sidtab, we need to convert the context
* and insert it there as well
*/
convert = s->convert;
if (convert) {
struct sidtab *target = convert->target;
rc = -ENOMEM;
dst_convert = sidtab_do_lookup(target, count, 1);
if (!dst_convert) {
context_destroy(&dst->context);
goto out_unlock;
}
rc = services_convert_context(convert->args, context,
&dst_convert->context,
GFP_ATOMIC);
if (rc) {
context_destroy(&dst->context);
goto out_unlock;
}
dst_convert->sid = index_to_sid(count);
dst_convert->hash = context_compute_hash(&dst_convert->context);
target->count = count + 1;
hash_add_rcu(target->context_to_sid, &dst_convert->list,
dst_convert->hash);
}
if (context->len)
pr_info("SELinux: Context %s is not valid (left unmapped).\n",
context->str);
*sid = index_to_sid(count);
/* write entries before updating count */
smp_store_release(&s->count, count + 1);
hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
rc = 0;
out_unlock:
spin_unlock_irqrestore(&s->lock, flags);
return rc;
}
static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
{
struct sidtab_entry *entry;
u32 i;
for (i = 0; i < count; i++) {
entry = sidtab_do_lookup(s, i, 0);
entry->sid = index_to_sid(i);
entry->hash = context_compute_hash(&entry->context);
hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
}
}
static int sidtab_convert_tree(union sidtab_entry_inner *edst,
union sidtab_entry_inner *esrc, u32 *pos,
u32 count, u32 level,
struct sidtab_convert_params *convert)
{
int rc;
u32 i;
if (level != 0) {
if (!edst->ptr_inner) {
edst->ptr_inner =
kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_KERNEL);
if (!edst->ptr_inner)
return -ENOMEM;
}
i = 0;
while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
&esrc->ptr_inner->entries[i],
pos, count, level - 1,
convert);
if (rc)
return rc;
i++;
}
} else {
if (!edst->ptr_leaf) {
edst->ptr_leaf =
kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_KERNEL);
if (!edst->ptr_leaf)
return -ENOMEM;
}
i = 0;
while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
rc = services_convert_context(
convert->args,
&esrc->ptr_leaf->entries[i].context,
&edst->ptr_leaf->entries[i].context,
GFP_KERNEL);
if (rc)
return rc;
(*pos)++;
i++;
}
cond_resched();
}
return 0;
}
int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
{
unsigned long flags;
u32 count, level, pos;
int rc;
spin_lock_irqsave(&s->lock, flags);
/* concurrent policy loads are not allowed */
if (s->convert) {
spin_unlock_irqrestore(&s->lock, flags);
return -EBUSY;
}
count = s->count;
level = sidtab_level_from_count(count);
/* allocate last leaf in the new sidtab (to avoid race with
* live convert)
*/
rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
if (rc) {
spin_unlock_irqrestore(&s->lock, flags);
return rc;
}
/* set count in case no new entries are added during conversion */
params->target->count = count;
/* enable live convert of new entries */
s->convert = params;
/* we can safely convert the tree outside the lock */
spin_unlock_irqrestore(&s->lock, flags);
pr_info("SELinux: Converting %u SID table entries...\n", count);
/* convert all entries not covered by live convert */
pos = 0;
rc = sidtab_convert_tree(¶ms->target->roots[level],
&s->roots[level], &pos, count, level, params);
if (rc) {
/* we need to keep the old table - disable live convert */
spin_lock_irqsave(&s->lock, flags);
s->convert = NULL;
spin_unlock_irqrestore(&s->lock, flags);
return rc;
}
/*
* The hashtable can also be modified in sidtab_context_to_sid()
* so we must re-acquire the lock here.
*/
spin_lock_irqsave(&s->lock, flags);
sidtab_convert_hashtable(params->target, count);
spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
void sidtab_cancel_convert(struct sidtab *s)
{
unsigned long flags;
/* cancelling policy load - disable live convert of sidtab */
spin_lock_irqsave(&s->lock, flags);
s->convert = NULL;
spin_unlock_irqrestore(&s->lock, flags);
}
void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags)
__acquires(&s->lock)
{
spin_lock_irqsave(&s->lock, *flags);
s->frozen = true;
s->convert = NULL;
}
void sidtab_freeze_end(struct sidtab *s, unsigned long *flags)
__releases(&s->lock)
{
spin_unlock_irqrestore(&s->lock, *flags);
}
static void sidtab_destroy_entry(struct sidtab_entry *entry)
{
context_destroy(&entry->context);
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
kfree(rcu_dereference_raw(entry->cache));
#endif
}
static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
{
u32 i;
if (level != 0) {
struct sidtab_node_inner *node = entry.ptr_inner;
if (!node)
return;
for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
sidtab_destroy_tree(node->entries[i], level - 1);
kfree(node);
} else {
struct sidtab_node_leaf *node = entry.ptr_leaf;
if (!node)
return;
for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
sidtab_destroy_entry(&node->entries[i]);
kfree(node);
}
}
void sidtab_destroy(struct sidtab *s)
{
u32 i, level;
for (i = 0; i < SECINITSID_NUM; i++)
if (s->isids[i].set)
sidtab_destroy_entry(&s->isids[i].entry);
level = SIDTAB_MAX_LEVEL;
while (level && !s->roots[level].ptr_inner)
--level;
sidtab_destroy_tree(s->roots[level], level);
/*
* The context_to_sid hashtable's objects are all shared
* with the isids array and context tree, and so don't need
* to be cleaned up here.
*/
}
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
const char *str, u32 str_len)
{
struct sidtab_str_cache *cache, *victim = NULL;
unsigned long flags;
/* do not cache invalid contexts */
if (entry->context.len)
return;
spin_lock_irqsave(&s->cache_lock, flags);
cache = rcu_dereference_protected(entry->cache,
lockdep_is_held(&s->cache_lock));
if (cache) {
/* entry in cache - just bump to the head of LRU list */
list_move(&cache->lru_member, &s->cache_lru_list);
goto out_unlock;
}
cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
if (!cache)
goto out_unlock;
if (s->cache_free_slots == 0) {
/* pop a cache entry from the tail and free it */
victim = container_of(s->cache_lru_list.prev,
struct sidtab_str_cache, lru_member);
list_del(&victim->lru_member);
rcu_assign_pointer(victim->parent->cache, NULL);
} else {
s->cache_free_slots--;
}
cache->parent = entry;
cache->len = str_len;
memcpy(cache->str, str, str_len);
list_add(&cache->lru_member, &s->cache_lru_list);
rcu_assign_pointer(entry->cache, cache);
out_unlock:
spin_unlock_irqrestore(&s->cache_lock, flags);
kfree_rcu(victim, rcu_member);
}
int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry, char **out,
u32 *out_len)
{
struct sidtab_str_cache *cache;
int rc = 0;
if (entry->context.len)
return -ENOENT; /* do not cache invalid contexts */
rcu_read_lock();
cache = rcu_dereference(entry->cache);
if (!cache) {
rc = -ENOENT;
} else {
*out_len = cache->len;
if (out) {
*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
if (!*out)
rc = -ENOMEM;
}
}
rcu_read_unlock();
if (!rc && out)
sidtab_sid2str_put(s, entry, *out, *out_len);
return rc;
}
#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/crc-ccitt.c
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-ccitt.h>
/*
* This mysterious table is just the CRC of each possible byte. It can be
* computed using the standard bit-at-a-time methods. The polynomial can
* be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
* Add the implicit x^16, and you have the standard CRC-CCITT.
*/
u16 const crc_ccitt_table[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
EXPORT_SYMBOL(crc_ccitt_table);
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc_ccitt_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_ccitt);
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_DELALLOC_SPACE_H
#define BTRFS_DELALLOC_SPACE_H
#include <linux/types.h>
struct extent_changeset;
struct btrfs_inode;
struct btrfs_fs_info;
int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes);
int btrfs_check_data_free_space(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len,
bool noflush);
void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len);
void btrfs_delalloc_release_space(struct btrfs_inode *inode,
struct extent_changeset *reserved,
u64 start, u64 len, bool qgroup_free);
void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
u64 len);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free);
int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
u64 disk_num_bytes, bool noflush);
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
#endif /* BTRFS_DELALLOC_SPACE_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Namjae Jeon <[email protected]>
* Copyright (C) 2019 Samsung Electronics Co., Ltd.
*/
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include "glob.h"
#include "vfs_cache.h"
#include "oplock.h"
#include "vfs.h"
#include "connection.h"
#include "mgmt/tree_connect.h"
#include "mgmt/user_session.h"
#include "smb_common.h"
#include "server.h"
#define S_DEL_PENDING 1
#define S_DEL_ON_CLS 2
#define S_DEL_ON_CLS_STREAM 8
static unsigned int inode_hash_mask __read_mostly;
static unsigned int inode_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly;
static DEFINE_RWLOCK(inode_hash_lock);
static struct ksmbd_file_table global_ft;
static atomic_long_t fd_limit;
static struct kmem_cache *filp_cache;
static bool durable_scavenger_running;
static DEFINE_MUTEX(durable_scavenger_lock);
static wait_queue_head_t dh_wq;
void ksmbd_set_fd_limit(unsigned long limit)
{
limit = min(limit, get_max_files());
atomic_long_set(&fd_limit, limit);
}
static bool fd_limit_depleted(void)
{
long v = atomic_long_dec_return(&fd_limit);
if (v >= 0)
return false;
atomic_long_inc(&fd_limit);
return true;
}
static void fd_limit_close(void)
{
atomic_long_inc(&fd_limit);
}
/*
* INODE hash
*/
static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
return tmp & inode_hash_mask;
}
static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
{
struct hlist_head *head = inode_hashtable +
inode_hash(d_inode(de)->i_sb, (unsigned long)de);
struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
hlist_for_each_entry(ci, head, m_hash) {
if (ci->m_de == de) {
if (atomic_inc_not_zero(&ci->m_count))
ret_ci = ci;
break;
}
}
return ret_ci;
}
static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
{
return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
}
struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
{
struct ksmbd_inode *ci;
read_lock(&inode_hash_lock);
ci = __ksmbd_inode_lookup(d);
read_unlock(&inode_hash_lock);
return ci;
}
int ksmbd_query_inode_status(struct dentry *dentry)
{
struct ksmbd_inode *ci;
int ret = KSMBD_INODE_STATUS_UNKNOWN;
read_lock(&inode_hash_lock);
ci = __ksmbd_inode_lookup(dentry);
if (ci) {
ret = KSMBD_INODE_STATUS_OK;
if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
ret = KSMBD_INODE_STATUS_PENDING_DELETE;
atomic_dec(&ci->m_count);
}
read_unlock(&inode_hash_lock);
return ret;
}
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
{
return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
}
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
{
fp->f_ci->m_flags |= S_DEL_PENDING;
}
void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
{
fp->f_ci->m_flags &= ~S_DEL_PENDING;
}
void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
int file_info)
{
if (ksmbd_stream_fd(fp)) {
fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
return;
}
fp->f_ci->m_flags |= S_DEL_ON_CLS;
}
static void ksmbd_inode_hash(struct ksmbd_inode *ci)
{
struct hlist_head *b = inode_hashtable +
inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
hlist_add_head(&ci->m_hash, b);
}
static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
{
write_lock(&inode_hash_lock);
hlist_del_init(&ci->m_hash);
write_unlock(&inode_hash_lock);
}
static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
{
atomic_set(&ci->m_count, 1);
atomic_set(&ci->op_count, 0);
atomic_set(&ci->sop_count, 0);
ci->m_flags = 0;
ci->m_fattr = 0;
INIT_LIST_HEAD(&ci->m_fp_list);
INIT_LIST_HEAD(&ci->m_op_list);
init_rwsem(&ci->m_lock);
ci->m_de = fp->filp->f_path.dentry;
return 0;
}
static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
{
struct ksmbd_inode *ci, *tmpci;
int rc;
read_lock(&inode_hash_lock);
ci = ksmbd_inode_lookup(fp);
read_unlock(&inode_hash_lock);
if (ci)
return ci;
ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP);
if (!ci)
return NULL;
rc = ksmbd_inode_init(ci, fp);
if (rc) {
pr_err("inode initialized failed\n");
kfree(ci);
return NULL;
}
write_lock(&inode_hash_lock);
tmpci = ksmbd_inode_lookup(fp);
if (!tmpci) {
ksmbd_inode_hash(ci);
} else {
kfree(ci);
ci = tmpci;
}
write_unlock(&inode_hash_lock);
return ci;
}
static void ksmbd_inode_free(struct ksmbd_inode *ci)
{
ksmbd_inode_unhash(ci);
kfree(ci);
}
void ksmbd_inode_put(struct ksmbd_inode *ci)
{
if (atomic_dec_and_test(&ci->m_count))
ksmbd_inode_free(ci);
}
int __init ksmbd_inode_hash_init(void)
{
unsigned int loop;
unsigned long numentries = 16384;
unsigned long bucketsize = sizeof(struct hlist_head);
unsigned long size;
inode_hash_shift = ilog2(numentries);
inode_hash_mask = (1 << inode_hash_shift) - 1;
size = bucketsize << inode_hash_shift;
/* init master fp hash table */
inode_hashtable = vmalloc(size);
if (!inode_hashtable)
return -ENOMEM;
for (loop = 0; loop < (1U << inode_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]);
return 0;
}
void ksmbd_release_inode_hash(void)
{
vfree(inode_hashtable);
}
static void __ksmbd_inode_close(struct ksmbd_file *fp)
{
struct ksmbd_inode *ci = fp->f_ci;
int err;
struct file *filp;
filp = fp->filp;
if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
&filp->f_path,
fp->stream.name,
true);
if (err)
pr_err("remove xattr failed : %s\n",
fp->stream.name);
}
if (atomic_dec_and_test(&ci->m_count)) {
down_write(&ci->m_lock);
if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
up_write(&ci->m_lock);
ksmbd_vfs_unlink(filp);
down_write(&ci->m_lock);
}
up_write(&ci->m_lock);
ksmbd_inode_free(ci);
}
}
static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
{
if (!has_file_id(fp->persistent_id))
return;
idr_remove(global_ft.idr, fp->persistent_id);
}
static void ksmbd_remove_durable_fd(struct ksmbd_file *fp)
{
write_lock(&global_ft.lock);
__ksmbd_remove_durable_fd(fp);
write_unlock(&global_ft.lock);
if (waitqueue_active(&dh_wq))
wake_up(&dh_wq);
}
static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
{
if (!has_file_id(fp->volatile_id))
return;
down_write(&fp->f_ci->m_lock);
list_del_init(&fp->node);
up_write(&fp->f_ci->m_lock);
write_lock(&ft->lock);
idr_remove(ft->idr, fp->volatile_id);
write_unlock(&ft->lock);
}
static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
{
struct file *filp;
struct ksmbd_lock *smb_lock, *tmp_lock;
fd_limit_close();
ksmbd_remove_durable_fd(fp);
if (ft)
__ksmbd_remove_fd(ft, fp);
close_id_del_oplock(fp);
filp = fp->filp;
__ksmbd_inode_close(fp);
if (!IS_ERR_OR_NULL(filp))
fput(filp);
/* because the reference count of fp is 0, it is guaranteed that
* there are not accesses to fp->lock_list.
*/
list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
spin_lock(&fp->conn->llist_lock);
list_del(&smb_lock->clist);
spin_unlock(&fp->conn->llist_lock);
list_del(&smb_lock->flist);
locks_free_lock(smb_lock->fl);
kfree(smb_lock);
}
if (ksmbd_stream_fd(fp))
kfree(fp->stream.name);
kmem_cache_free(filp_cache, fp);
}
static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
{
if (fp->f_state != FP_INITED)
return NULL;
if (!atomic_inc_not_zero(&fp->refcount))
return NULL;
return fp;
}
static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
u64 id)
{
struct ksmbd_file *fp;
if (!has_file_id(id))
return NULL;
read_lock(&ft->lock);
fp = idr_find(ft->idr, id);
if (fp)
fp = ksmbd_fp_get(fp);
read_unlock(&ft->lock);
return fp;
}
static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
{
__ksmbd_close_fd(&work->sess->file_table, fp);
atomic_dec(&work->conn->stats.open_files_count);
}
static void set_close_state_blocked_works(struct ksmbd_file *fp)
{
struct ksmbd_work *cancel_work;
spin_lock(&fp->f_lock);
list_for_each_entry(cancel_work, &fp->blocked_works,
fp_entry) {
cancel_work->state = KSMBD_WORK_CLOSED;
cancel_work->cancel_fn(cancel_work->cancel_argv);
}
spin_unlock(&fp->f_lock);
}
int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
{
struct ksmbd_file *fp;
struct ksmbd_file_table *ft;
if (!has_file_id(id))
return 0;
ft = &work->sess->file_table;
write_lock(&ft->lock);
fp = idr_find(ft->idr, id);
if (fp) {
set_close_state_blocked_works(fp);
if (fp->f_state != FP_INITED)
fp = NULL;
else {
fp->f_state = FP_CLOSED;
if (!atomic_dec_and_test(&fp->refcount))
fp = NULL;
}
}
write_unlock(&ft->lock);
if (!fp)
return -EINVAL;
__put_fd_final(work, fp);
return 0;
}
void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
{
if (!fp)
return;
if (!atomic_dec_and_test(&fp->refcount))
return;
__put_fd_final(work, fp);
}
static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
{
if (!fp)
return false;
if (fp->tcon != tcon)
return false;
return true;
}
struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
{
return __ksmbd_lookup_fd(&work->sess->file_table, id);
}
struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
{
struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
if (__sanity_check(work->tcon, fp))
return fp;
ksmbd_fd_put(work, fp);
return NULL;
}
struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
u64 pid)
{
struct ksmbd_file *fp;
if (!has_file_id(id)) {
id = work->compound_fid;
pid = work->compound_pfid;
}
fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
if (!__sanity_check(work->tcon, fp)) {
ksmbd_fd_put(work, fp);
return NULL;
}
if (fp->persistent_id != pid) {
ksmbd_fd_put(work, fp);
return NULL;
}
return fp;
}
struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
{
return __ksmbd_lookup_fd(&global_ft, id);
}
struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
{
struct ksmbd_file *fp;
fp = __ksmbd_lookup_fd(&global_ft, id);
if (fp && (fp->conn ||
(fp->durable_scavenger_timeout &&
(fp->durable_scavenger_timeout <
jiffies_to_msecs(jiffies))))) {
ksmbd_put_durable_fd(fp);
fp = NULL;
}
return fp;
}
void ksmbd_put_durable_fd(struct ksmbd_file *fp)
{
if (!atomic_dec_and_test(&fp->refcount))
return;
__ksmbd_close_fd(NULL, fp);
}
struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
read_lock(&global_ft.lock);
idr_for_each_entry(global_ft.idr, fp, id) {
if (!memcmp(fp->create_guid,
cguid,
SMB2_CREATE_GUID_SIZE)) {
fp = ksmbd_fp_get(fp);
break;
}
}
read_unlock(&global_ft.lock);
return fp;
}
struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
{
struct ksmbd_file *lfp;
struct ksmbd_inode *ci;
struct inode *inode = d_inode(dentry);
read_lock(&inode_hash_lock);
ci = __ksmbd_inode_lookup(dentry);
read_unlock(&inode_hash_lock);
if (!ci)
return NULL;
down_read(&ci->m_lock);
list_for_each_entry(lfp, &ci->m_fp_list, node) {
if (inode == file_inode(lfp->filp)) {
atomic_dec(&ci->m_count);
lfp = ksmbd_fp_get(lfp);
up_read(&ci->m_lock);
return lfp;
}
}
atomic_dec(&ci->m_count);
up_read(&ci->m_lock);
return NULL;
}
#define OPEN_ID_TYPE_VOLATILE_ID (0)
#define OPEN_ID_TYPE_PERSISTENT_ID (1)
static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
{
if (type == OPEN_ID_TYPE_VOLATILE_ID)
fp->volatile_id = id;
if (type == OPEN_ID_TYPE_PERSISTENT_ID)
fp->persistent_id = id;
}
static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
int type)
{
u64 id = 0;
int ret;
if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
__open_id_set(fp, KSMBD_NO_FID, type);
return -EMFILE;
}
idr_preload(KSMBD_DEFAULT_GFP);
write_lock(&ft->lock);
ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
if (ret >= 0) {
id = ret;
ret = 0;
} else {
id = KSMBD_NO_FID;
fd_limit_close();
}
__open_id_set(fp, id, type);
write_unlock(&ft->lock);
idr_preload_end();
return ret;
}
unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
{
__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
return fp->persistent_id;
}
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
{
struct ksmbd_file *fp;
int ret;
fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP);
if (!fp) {
pr_err("Failed to allocate memory\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&fp->blocked_works);
INIT_LIST_HEAD(&fp->node);
INIT_LIST_HEAD(&fp->lock_list);
spin_lock_init(&fp->f_lock);
atomic_set(&fp->refcount, 1);
fp->filp = filp;
fp->conn = work->conn;
fp->tcon = work->tcon;
fp->volatile_id = KSMBD_NO_FID;
fp->persistent_id = KSMBD_NO_FID;
fp->f_state = FP_NEW;
fp->f_ci = ksmbd_inode_get(fp);
if (!fp->f_ci) {
ret = -ENOMEM;
goto err_out;
}
ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
if (ret) {
ksmbd_inode_put(fp->f_ci);
goto err_out;
}
atomic_inc(&work->conn->stats.open_files_count);
return fp;
err_out:
kmem_cache_free(filp_cache, fp);
return ERR_PTR(ret);
}
void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
unsigned int state)
{
if (!fp)
return;
write_lock(&ft->lock);
fp->f_state = state;
write_unlock(&ft->lock);
}
static int
__close_file_table_ids(struct ksmbd_file_table *ft,
struct ksmbd_tree_connect *tcon,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp))
{
unsigned int id;
struct ksmbd_file *fp;
int num = 0;
idr_for_each_entry(ft->idr, fp, id) {
if (skip(tcon, fp))
continue;
set_close_state_blocked_works(fp);
if (!atomic_dec_and_test(&fp->refcount))
continue;
__ksmbd_close_fd(ft, fp);
num++;
}
return num;
}
static inline bool is_reconnectable(struct ksmbd_file *fp)
{
struct oplock_info *opinfo = opinfo_get(fp);
bool reconn = false;
if (!opinfo)
return false;
if (opinfo->op_state != OPLOCK_STATE_NONE) {
opinfo_put(opinfo);
return false;
}
if (fp->is_resilient || fp->is_persistent)
reconn = true;
else if (fp->is_durable && opinfo->is_lease &&
opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
reconn = true;
else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
reconn = true;
opinfo_put(opinfo);
return reconn;
}
static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp)
{
return fp->tcon != tcon;
}
static bool ksmbd_durable_scavenger_alive(void)
{
mutex_lock(&durable_scavenger_lock);
if (!durable_scavenger_running) {
mutex_unlock(&durable_scavenger_lock);
return false;
}
mutex_unlock(&durable_scavenger_lock);
if (kthread_should_stop())
return false;
if (idr_is_empty(global_ft.idr))
return false;
return true;
}
static void ksmbd_scavenger_dispose_dh(struct list_head *head)
{
while (!list_empty(head)) {
struct ksmbd_file *fp;
fp = list_first_entry(head, struct ksmbd_file, node);
list_del_init(&fp->node);
__ksmbd_close_fd(NULL, fp);
}
}
static int ksmbd_durable_scavenger(void *dummy)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
unsigned int min_timeout = 1;
bool found_fp_timeout;
LIST_HEAD(scavenger_list);
unsigned long remaining_jiffies;
__module_get(THIS_MODULE);
set_freezable();
while (ksmbd_durable_scavenger_alive()) {
if (try_to_freeze())
continue;
found_fp_timeout = false;
remaining_jiffies = wait_event_timeout(dh_wq,
ksmbd_durable_scavenger_alive() == false,
__msecs_to_jiffies(min_timeout));
if (remaining_jiffies)
min_timeout = jiffies_to_msecs(remaining_jiffies);
else
min_timeout = DURABLE_HANDLE_MAX_TIMEOUT;
write_lock(&global_ft.lock);
idr_for_each_entry(global_ft.idr, fp, id) {
if (!fp->durable_timeout)
continue;
if (atomic_read(&fp->refcount) > 1 ||
fp->conn)
continue;
found_fp_timeout = true;
if (fp->durable_scavenger_timeout <=
jiffies_to_msecs(jiffies)) {
__ksmbd_remove_durable_fd(fp);
list_add(&fp->node, &scavenger_list);
} else {
unsigned long durable_timeout;
durable_timeout =
fp->durable_scavenger_timeout -
jiffies_to_msecs(jiffies);
if (min_timeout > durable_timeout)
min_timeout = durable_timeout;
}
}
write_unlock(&global_ft.lock);
ksmbd_scavenger_dispose_dh(&scavenger_list);
if (found_fp_timeout == false)
break;
}
mutex_lock(&durable_scavenger_lock);
durable_scavenger_running = false;
mutex_unlock(&durable_scavenger_lock);
module_put(THIS_MODULE);
return 0;
}
void ksmbd_launch_ksmbd_durable_scavenger(void)
{
if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
return;
mutex_lock(&durable_scavenger_lock);
if (durable_scavenger_running == true) {
mutex_unlock(&durable_scavenger_lock);
return;
}
durable_scavenger_running = true;
server_conf.dh_task = kthread_run(ksmbd_durable_scavenger,
(void *)NULL, "ksmbd-durable-scavenger");
if (IS_ERR(server_conf.dh_task))
pr_err("cannot start conn thread, err : %ld\n",
PTR_ERR(server_conf.dh_task));
mutex_unlock(&durable_scavenger_lock);
}
void ksmbd_stop_durable_scavenger(void)
{
if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
return;
mutex_lock(&durable_scavenger_lock);
if (!durable_scavenger_running) {
mutex_unlock(&durable_scavenger_lock);
return;
}
durable_scavenger_running = false;
if (waitqueue_active(&dh_wq))
wake_up(&dh_wq);
mutex_unlock(&durable_scavenger_lock);
kthread_stop(server_conf.dh_task);
}
static bool session_fd_check(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp)
{
struct ksmbd_inode *ci;
struct oplock_info *op;
struct ksmbd_conn *conn;
if (!is_reconnectable(fp))
return false;
conn = fp->conn;
ci = fp->f_ci;
down_write(&ci->m_lock);
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn != conn)
continue;
if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
kfree(op->conn);
op->conn = NULL;
}
up_write(&ci->m_lock);
fp->conn = NULL;
fp->tcon = NULL;
fp->volatile_id = KSMBD_NO_FID;
if (fp->durable_timeout)
fp->durable_scavenger_timeout =
jiffies_to_msecs(jiffies) + fp->durable_timeout;
return true;
}
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
{
int num = __close_file_table_ids(&work->sess->file_table,
work->tcon,
tree_conn_fd_check);
atomic_sub(num, &work->conn->stats.open_files_count);
}
void ksmbd_close_session_fds(struct ksmbd_work *work)
{
int num = __close_file_table_ids(&work->sess->file_table,
work->tcon,
session_fd_check);
atomic_sub(num, &work->conn->stats.open_files_count);
}
int ksmbd_init_global_file_table(void)
{
return ksmbd_init_file_table(&global_ft);
}
void ksmbd_free_global_file_table(void)
{
struct ksmbd_file *fp = NULL;
unsigned int id;
idr_for_each_entry(global_ft.idr, fp, id) {
ksmbd_remove_durable_fd(fp);
__ksmbd_close_fd(NULL, fp);
}
idr_destroy(global_ft.idr);
kfree(global_ft.idr);
}
int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
struct ksmbd_file *fp, char *name)
{
char *pathname, *ab_pathname;
int ret = 0;
pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
if (!pathname)
return -EACCES;
ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
if (IS_ERR(ab_pathname)) {
kfree(pathname);
return -EACCES;
}
if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
ret = -EINVAL;
}
kfree(pathname);
return ret;
}
int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
{
struct ksmbd_inode *ci;
struct oplock_info *op;
if (!fp->is_durable || fp->conn || fp->tcon) {
pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
return -EBADF;
}
if (has_file_id(fp->volatile_id)) {
pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
return -EBADF;
}
fp->conn = work->conn;
fp->tcon = work->tcon;
ci = fp->f_ci;
down_write(&ci->m_lock);
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn)
continue;
op->conn = fp->conn;
atomic_inc(&op->conn->refcnt);
}
up_write(&ci->m_lock);
fp->f_state = FP_NEW;
__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
if (!has_file_id(fp->volatile_id)) {
fp->conn = NULL;
fp->tcon = NULL;
return -EBADF;
}
return 0;
}
int ksmbd_init_file_table(struct ksmbd_file_table *ft)
{
ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP);
if (!ft->idr)
return -ENOMEM;
idr_init(ft->idr);
rwlock_init(&ft->lock);
return 0;
}
void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
{
if (!ft->idr)
return;
__close_file_table_ids(ft, NULL, session_fd_check);
idr_destroy(ft->idr);
kfree(ft->idr);
ft->idr = NULL;
}
int ksmbd_init_file_cache(void)
{
filp_cache = kmem_cache_create("ksmbd_file_cache",
sizeof(struct ksmbd_file), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!filp_cache)
goto out;
init_waitqueue_head(&dh_wq);
return 0;
out:
pr_err("failed to allocate file cache\n");
return -ENOMEM;
}
void ksmbd_exit_file_cache(void)
{
kmem_cache_destroy(filp_cache);
}
|
/* Set tz value
* by: John Stultz <[email protected]>
* (C) Copyright Linaro 2016
* Licensed under the GPLv2
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "../kselftest.h"
int set_tz(int min, int dst)
{
struct timezone tz;
tz.tz_minuteswest = min;
tz.tz_dsttime = dst;
return settimeofday(0, &tz);
}
int get_tz_min(void)
{
struct timezone tz;
struct timeval tv;
memset(&tz, 0, sizeof(tz));
gettimeofday(&tv, &tz);
return tz.tz_minuteswest;
}
int get_tz_dst(void)
{
struct timezone tz;
struct timeval tv;
memset(&tz, 0, sizeof(tz));
gettimeofday(&tv, &tz);
return tz.tz_dsttime;
}
int main(int argc, char **argv)
{
int i, ret;
int min, dst;
min = get_tz_min();
dst = get_tz_dst();
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
printf("Checking tz_minuteswest can be properly set: ");
fflush(stdout);
for (i = -15*60; i < 15*60; i += 30) {
ret = set_tz(i, dst);
ret = get_tz_min();
if (ret != i) {
printf("[FAILED] expected: %i got %i\n", i, ret);
goto err;
}
}
printf("[OK]\n");
printf("Checking invalid tz_minuteswest values are caught: ");
fflush(stdout);
if (!set_tz(-15*60-1, dst)) {
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
goto err;
}
if (!set_tz(15*60+1, dst)) {
printf("[FAILED] %i didn't return failure!\n", 15*60+1);
goto err;
}
if (!set_tz(-24*60, dst)) {
printf("[FAILED] %i didn't return failure!\n", -24*60);
goto err;
}
if (!set_tz(24*60, dst)) {
printf("[FAILED] %i didn't return failure!\n", 24*60);
goto err;
}
printf("[OK]\n");
set_tz(min, dst);
ksft_exit_pass();
err:
set_tz(min, dst);
ksft_exit_fail();
}
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_
#define ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_
/*
*****************************************
* DCORE0_MME_WB0_MSTR_IF_AXUSER
* (Prototype: AXUSER)
*****************************************
*/
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_ASID 0x40F9A80
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_MMU_BP 0x40F9A84
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_STRONG_ORDER 0x40F9A88
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_NO_SNOOP 0x40F9A8C
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_REDUCTION 0x40F9A90
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_ATOMIC 0x40F9A94
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_QOS 0x40F9A98
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RSVD 0x40F9A9C
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_EMEM_CPAGE 0x40F9AA0
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_CORE 0x40F9AA4
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_E2E_COORD 0x40F9AA8
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_OVRD_LO 0x40F9AB0
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_OVRD_HI 0x40F9AB4
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_OVRD_LO 0x40F9AB8
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_OVRD_HI 0x40F9ABC
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_COORD 0x40F9AC0
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_LOCK 0x40F9AC4
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_RSVD 0x40F9AC8
#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_OVRD 0x40F9ACC
#endif /* ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <asm/fpu.h>
#include <asm/smp.h>
static unsigned int euen_mask = CSR_EUEN_FPEN;
/*
* The critical section between kernel_fpu_begin() and kernel_fpu_end()
* is non-reentrant. It is the caller's responsibility to avoid reentrance.
* See drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c as an example.
*/
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
preempt_disable();
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
euen_curr = this_cpu_ptr(&euen_current);
*euen_curr = csr_xchg32(euen_mask, euen_mask, LOONGARCH_CSR_EUEN);
#ifdef CONFIG_CPU_HAS_LASX
if (*euen_curr & CSR_EUEN_LASXEN)
_save_lasx(¤t->thread.fpu);
else
#endif
#ifdef CONFIG_CPU_HAS_LSX
if (*euen_curr & CSR_EUEN_LSXEN)
_save_lsx(¤t->thread.fpu);
else
#endif
if (*euen_curr & CSR_EUEN_FPEN)
_save_fp(¤t->thread.fpu);
write_fcsr(LOONGARCH_FCSR0, 0);
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void kernel_fpu_end(void)
{
unsigned int *euen_curr;
WARN_ON(!this_cpu_read(in_kernel_fpu));
euen_curr = this_cpu_ptr(&euen_current);
#ifdef CONFIG_CPU_HAS_LASX
if (*euen_curr & CSR_EUEN_LASXEN)
_restore_lasx(¤t->thread.fpu);
else
#endif
#ifdef CONFIG_CPU_HAS_LSX
if (*euen_curr & CSR_EUEN_LSXEN)
_restore_lsx(¤t->thread.fpu);
else
#endif
if (*euen_curr & CSR_EUEN_FPEN)
_restore_fp(¤t->thread.fpu);
*euen_curr = csr_xchg32(*euen_curr, euen_mask, LOONGARCH_CSR_EUEN);
this_cpu_write(in_kernel_fpu, false);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
static int __init init_euen_mask(void)
{
if (cpu_has_lsx)
euen_mask |= CSR_EUEN_LSXEN;
if (cpu_has_lasx)
euen_mask |= CSR_EUEN_LASXEN;
return 0;
}
arch_initcall(init_euen_mask);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SCSI_DRIVER_H
#define _SCSI_SCSI_DRIVER_H
#include <linux/blk_types.h>
#include <linux/device.h>
#include <scsi/scsi_cmnd.h>
struct module;
struct request;
struct scsi_driver {
struct device_driver gendrv;
int (*resume)(struct device *);
void (*rescan)(struct device *);
blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
void (*eh_reset)(struct scsi_cmnd *);
};
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)
#define scsi_register_driver(drv) \
__scsi_register_driver(drv, THIS_MODULE)
int __scsi_register_driver(struct device_driver *, struct module *);
#define scsi_unregister_driver(drv) \
driver_unregister(drv);
extern int scsi_register_interface(struct class_interface *);
#define scsi_unregister_interface(intf) \
class_interface_unregister(intf)
/* make sure not to use it with passthrough commands */
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
return to_scsi_driver(cmd->device->sdev_gendev.driver);
}
#endif /* _SCSI_SCSI_DRIVER_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_X86_64
/*
* in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
* configuration
*/
#undef CONFIG_64BIT
#undef CONFIG_X86_64
#undef CONFIG_COMPAT
#undef CONFIG_PGTABLE_LEVELS
#undef CONFIG_ILLEGAL_POINTER_VALUE
#undef CONFIG_SPARSEMEM_VMEMMAP
#undef CONFIG_NR_CPUS
#undef CONFIG_PARAVIRT_XXL
#define CONFIG_X86_32 1
#define CONFIG_PGTABLE_LEVELS 2
#define CONFIG_PAGE_OFFSET 0
#define CONFIG_ILLEGAL_POINTER_VALUE 0
#define CONFIG_NR_CPUS 1
#define BUILD_VDSO32_64
#endif
|
#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define GMMU_FMT_MAX_LEVELS 6U
#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
/*!
* [in] GPU sub-device handle - this API only supports unicast.
* Pass 0 to use subDeviceId instead.
*/
NvHandle hSubDevice;
/*!
* [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
*/
NvU32 subDeviceId;
/*!
* [in] Page size (VA coverage) of the level to reserve.
* This need not be a leaf (page table) page size - it can be
* the coverage of an arbitrary level (including root page directory).
*/
NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
/*!
* [in] First GPU virtual address of the range to reserve.
* This must be aligned to pageSize.
*/
NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
/*!
* [in] Last GPU virtual address of the range to reserve.
* This (+1) must be aligned to pageSize.
*/
NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
/*!
* [in] Number of PDE levels to copy.
*/
NvU32 numLevelsToCopy;
/*!
* [in] Per-level information.
*/
struct {
/*!
* Physical address of this page level instance.
*/
NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
/*!
* Size in bytes allocated for this level instance.
*/
NV_DECLARE_ALIGNED(NvU64 size, 8);
/*!
* Aperture in which this page level instance resides.
*/
NvU32 aperture;
/*!
* Page shift corresponding to the level
*/
NvU8 pageShift;
} levels[GMMU_FMT_MAX_LEVELS];
} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match MAC address parameters. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_mac.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Xtables: MAC address match");
MODULE_ALIAS("ipt_mac");
MODULE_ALIAS("ip6t_mac");
static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_mac_info *info = par->matchinfo;
bool ret;
if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
return false;
if (skb_mac_header(skb) < skb->head)
return false;
if (skb_mac_header(skb) + ETH_HLEN > skb->data)
return false;
ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
ret ^= info->invert;
return ret;
}
static struct xt_match mac_mt_reg __read_mostly = {
.name = "mac",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = mac_mt,
.matchsize = sizeof(struct xt_mac_info),
.hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD),
.me = THIS_MODULE,
};
static int __init mac_mt_init(void)
{
return xt_register_match(&mac_mt_reg);
}
static void __exit mac_mt_exit(void)
{
xt_unregister_match(&mac_mt_reg);
}
module_init(mac_mt_init);
module_exit(mac_mt_exit);
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2024 Amlogic, Inc. All rights reserved.
*/
#include "amlogic-a4-common.dtsi"
#include <dt-bindings/power/amlogic,a4-pwrc.h>
/ {
cpus {
#address-cells = <2>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
};
};
sm: secure-monitor {
compatible = "amlogic,meson-gxbb-sm";
pwrc: power-controller {
compatible = "amlogic,a4-pwrc";
#power-domain-cells = <1>;
};
};
};
|
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "speakup.h"
#include "spk_priv.h"
#include "speakup_dtlk.h" /* local header file for LiteTalk values */
#define DRV_VERSION "2.11"
#define PROCSPEECH 0x0d
static int synth_probe(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID, PUNCT_ID,
VOICE_ID, FREQUENCY_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+35p" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-35p" } },
[RATE_ID] = { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } },
[VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
[FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/ltlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&freq_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_ltlk = {
.name = "ltlk",
.version = DRV_VERSION,
.long_name = "LiteTalk",
.init = "\01@\x01\x31y\n\0",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = spk_synth_get_index,
.indexing = {
.command = "\x01%di",
.lowindex = 1,
.highindex = 5,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "ltlk",
},
};
/* interrogate the LiteTalk and print its settings */
static void synth_interrogate(struct spk_synth *synth)
{
unsigned char *t, i;
unsigned char buf[50], rom_v[20];
synth->synth_immediate(synth, "\x18\x01?");
for (i = 0; i < 50; i++) {
buf[i] = synth->io_ops->synth_in(synth);
if (i > 2 && buf[i] == 0x7f)
break;
}
t = buf + 2;
for (i = 0; *t != '\r'; t++) {
rom_v[i] = *t;
if (++i >= 19)
break;
}
rom_v[i] = 0;
pr_info("%s: ROM version: %s\n", synth->long_name, rom_v);
}
static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
failed = spk_ttyio_synth_probe(synth);
if (failed == 0)
synth_interrogate(synth);
synth->alive = !failed;
return failed;
}
module_param_named(ser, synth_ltlk.ser, int, 0444);
module_param_named(dev, synth_ltlk.dev_name, charp, 0444);
module_param_named(start, synth_ltlk.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(frequency, "Set the frequency variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_ltlk);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DoubleTalk LT/LiteTalk synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
|
/*
* Copyright 2015 Hans de Goede <[email protected]>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Or, alternatively,
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sun5i-a10s.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ {
model = "Auxtek t003 A10s hdmi tv-stick";
compatible = "allwinner,auxtek-t003", "allwinner,sun5i-a10s";
aliases {
serial0 = &uart0;
};
chosen {
stdout-path = "serial0:115200n8";
};
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&led_pins_t003>;
led {
label = "t003-tv-dongle:red:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2 */
default-state = "on";
};
};
};
&ehci0 {
status = "okay";
};
&i2c0 {
status = "okay";
axp152: pmic@30 {
compatible = "x-powers,axp152";
reg = <0x30>;
interrupts = <0>;
interrupt-controller;
#interrupt-cells = <1>;
};
};
&mmc0 {
vmmc-supply = <®_vcc3v3>;
bus-width = <4>;
cd-gpios = <&pio 6 1 GPIO_ACTIVE_LOW>; /* PG1 */
status = "okay";
};
&ohci0 {
status = "okay";
};
&otg_sram {
status = "okay";
};
&pio {
led_pins_t003: led-pin {
pins = "PB2";
function = "gpio_out";
drive-strength = <20>;
};
};
®_usb0_vbus {
gpio = <&pio 6 13 GPIO_ACTIVE_HIGH>; /* PG13 */
status = "okay";
};
®_usb1_vbus {
gpio = <&pio 1 10 GPIO_ACTIVE_HIGH>; /* PB10 */
status = "okay";
};
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
status = "okay";
};
&usb_otg {
dr_mode = "host";
status = "okay";
};
&usbphy {
usb0_vbus-supply = <®_usb0_vbus>;
usb1_vbus-supply = <®_usb1_vbus>;
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <net/hotdata.h>
#include <net/proto_memory.h>
struct net_hotdata net_hotdata __cacheline_aligned = {
.offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
.ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
.gro_normal_batch = 8,
.netdev_budget = 300,
/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
.netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
.tstamp_prequeue = 1,
.max_backlog = 1000,
.dev_tx_weight = 64,
.dev_rx_weight = 64,
.sysctl_max_skb_frags = MAX_SKB_FRAGS,
.sysctl_skb_defer_max = 64,
.sysctl_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE
};
EXPORT_SYMBOL(net_hotdata);
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Device Tree Source for AM625 SoC Family MCU Domain peripherals
*
* Copyright (C) 2020-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
&cbass_mcu {
mcu_pmx0: pinctrl@4084000 {
compatible = "pinctrl-single";
reg = <0x00 0x04084000 0x00 0x88>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;
status = "disabled";
};
mcu_esm: esm@4100000 {
compatible = "ti,j721e-esm";
reg = <0x0 0x4100000 0x0 0x1000>;
bootph-pre-ram;
/* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */
ti,esm-pins = <0>, <1>, <2>, <85>;
};
/*
* The MCU domain timer interrupts are routed only to the ESM module,
* and not currently available for Linux. The MCU domain timers are
* of limited use without interrupts, and likely reserved by the ESM.
*/
mcu_timer0: timer@4800000 {
compatible = "ti,am654-timer";
reg = <0x00 0x4800000 0x00 0x400>;
clocks = <&k3_clks 35 2>;
clock-names = "fck";
power-domains = <&k3_pds 35 TI_SCI_PD_EXCLUSIVE>;
ti,timer-pwm;
status = "reserved";
};
mcu_timer1: timer@4810000 {
compatible = "ti,am654-timer";
reg = <0x00 0x4810000 0x00 0x400>;
clocks = <&k3_clks 48 2>;
clock-names = "fck";
power-domains = <&k3_pds 48 TI_SCI_PD_EXCLUSIVE>;
ti,timer-pwm;
status = "reserved";
};
mcu_timer2: timer@4820000 {
compatible = "ti,am654-timer";
reg = <0x00 0x4820000 0x00 0x400>;
clocks = <&k3_clks 49 2>;
clock-names = "fck";
power-domains = <&k3_pds 49 TI_SCI_PD_EXCLUSIVE>;
ti,timer-pwm;
status = "reserved";
};
mcu_timer3: timer@4830000 {
compatible = "ti,am654-timer";
reg = <0x00 0x4830000 0x00 0x400>;
clocks = <&k3_clks 50 2>;
clock-names = "fck";
power-domains = <&k3_pds 50 TI_SCI_PD_EXCLUSIVE>;
ti,timer-pwm;
status = "reserved";
};
mcu_uart0: serial@4a00000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x04a00000 0x00 0x100>;
interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 149 0>;
clock-names = "fclk";
status = "disabled";
};
mcu_i2c0: i2c@4900000 {
compatible = "ti,am64-i2c", "ti,omap4-i2c";
reg = <0x00 0x04900000 0x00 0x100>;
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 106 2>;
clock-names = "fck";
status = "disabled";
};
mcu_spi0: spi@4b00000 {
compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
reg = <0x00 0x04b00000 0x00 0x400>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 147 0>;
status = "disabled";
};
mcu_spi1: spi@4b10000 {
compatible = "ti,am654-mcspi","ti,omap4-mcspi";
reg = <0x00 0x04b10000 0x00 0x400>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 148 0>;
status = "disabled";
};
mcu_gpio_intr: interrupt-controller@4210000 {
compatible = "ti,sci-intr";
reg = <0x00 0x04210000 0x00 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
#interrupt-cells = <1>;
ti,sci = <&dmsc>;
ti,sci-dev-id = <5>;
ti,interrupt-ranges = <0 104 4>;
};
mcu_gpio0: gpio@4201000 {
compatible = "ti,am64-gpio", "ti,keystone-gpio";
reg = <0x00 0x04201000 0x00 0x100>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&mcu_gpio_intr>;
interrupts = <30>, <31>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <24>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 79 0>;
clock-names = "gpio";
status = "disabled";
};
mcu_rti0: watchdog@4880000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x04880000 0x00 0x100>;
clocks = <&k3_clks 131 0>;
power-domains = <&k3_pds 131 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 131 0>;
assigned-clock-parents = <&k3_clks 131 2>;
/* Tightly coupled to M4F */
status = "reserved";
};
mcu_mcan0: can@4e08000 {
compatible = "bosch,m_can";
reg = <0x00 0x4e08000 0x00 0x200>,
<0x00 0x4e00000 0x00 0x8000>;
reg-names = "m_can", "message_ram";
power-domains = <&k3_pds 188 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 188 6>, <&k3_clks 188 1>;
clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
status = "disabled";
};
mcu_mcan1: can@4e18000 {
compatible = "bosch,m_can";
reg = <0x00 0x4e18000 0x00 0x200>,
<0x00 0x4e10000 0x00 0x8000>;
reg-names = "m_can", "message_ram";
power-domains = <&k3_pds 189 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 189 6>, <&k3_clks 189 1>;
clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
status = "disabled";
};
};
|
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef PP_THERMAL_H
#define PP_THERMAL_H
#include "power_state.h"
static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_HBM 5
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2431.c - w1 family 2d (DS2431) driver
*
* Copyright (c) 2008 Bernhard Weirich <[email protected]>
*
* Heavily inspired by w1_DS2433 driver from Ben Gardner <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/w1.h>
#define W1_EEPROM_DS2431 0x2D
#define W1_F2D_EEPROM_SIZE 128
#define W1_F2D_PAGE_COUNT 4
#define W1_F2D_PAGE_BITS 5
#define W1_F2D_PAGE_SIZE (1<<W1_F2D_PAGE_BITS)
#define W1_F2D_PAGE_MASK 0x1F
#define W1_F2D_SCRATCH_BITS 3
#define W1_F2D_SCRATCH_SIZE (1<<W1_F2D_SCRATCH_BITS)
#define W1_F2D_SCRATCH_MASK (W1_F2D_SCRATCH_SIZE-1)
#define W1_F2D_READ_EEPROM 0xF0
#define W1_F2D_WRITE_SCRATCH 0x0F
#define W1_F2D_READ_SCRATCH 0xAA
#define W1_F2D_COPY_SCRATCH 0x55
#define W1_F2D_TPROG_MS 11
#define W1_F2D_READ_RETRIES 10
#define W1_F2D_READ_MAXLEN 8
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f2d_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return size - off;
return count;
}
/*
* Read a block from W1 ROM two times and compares the results.
* If they are equal they are returned, otherwise the read
* is repeated W1_F2D_READ_RETRIES times.
*
* count must not exceed W1_F2D_READ_MAXLEN.
*/
static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
{
u8 wrbuf[3];
u8 cmp[W1_F2D_READ_MAXLEN];
int tries = W1_F2D_READ_RETRIES;
do {
wrbuf[0] = W1_F2D_READ_EEPROM;
wrbuf[1] = off & 0xff;
wrbuf[2] = off >> 8;
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, buf, count);
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, cmp, count);
if (!memcmp(cmp, buf, count))
return 0;
} while (--tries);
dev_err(&sl->dev, "proof reading failed %d times\n",
W1_F2D_READ_RETRIES);
return -1;
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int todo = count;
count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* read directly from the EEPROM in chunks of W1_F2D_READ_MAXLEN */
while (todo > 0) {
int block_read;
if (todo >= W1_F2D_READ_MAXLEN)
block_read = W1_F2D_READ_MAXLEN;
else
block_read = todo;
if (w1_f2d_readblock(sl, off, block_read, buf) < 0)
count = -EIO;
todo -= W1_F2D_READ_MAXLEN;
buf += W1_F2D_READ_MAXLEN;
off += W1_F2D_READ_MAXLEN;
}
mutex_unlock(&sl->master->bus_mutex);
return count;
}
/*
* Writes to the scratchpad and reads it back for verification.
* Then copies the scratchpad to EEPROM.
* The data must be aligned at W1_F2D_SCRATCH_SIZE bytes and
* must be W1_F2D_SCRATCH_SIZE bytes long.
* The master must be locked.
*
* @param sl The slave structure
* @param addr Address for the write
* @param len length must be <= (W1_F2D_PAGE_SIZE - (addr & W1_F2D_PAGE_MASK))
* @param data The data to write
* @return 0=Success -1=failure
*/
static int w1_f2d_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
int tries = W1_F2D_READ_RETRIES;
u8 wrbuf[4];
u8 rdbuf[W1_F2D_SCRATCH_SIZE + 3];
u8 es = (addr + len - 1) % W1_F2D_SCRATCH_SIZE;
retry:
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F2D_WRITE_SCRATCH;
wrbuf[1] = addr & 0xff;
wrbuf[2] = addr >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_write_block(sl->master, data, len);
/* Read the scratchpad and verify */
if (w1_reset_select_slave(sl))
return -1;
w1_write_8(sl->master, W1_F2D_READ_SCRATCH);
w1_read_block(sl->master, rdbuf, len + 3);
/* Compare what was read against the data written */
if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) ||
(rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) {
if (--tries)
goto retry;
dev_err(&sl->dev,
"could not write to eeprom, scratchpad compare failed %d times\n",
W1_F2D_READ_RETRIES);
return -1;
}
/* Copy the scratchpad to EEPROM */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F2D_COPY_SCRATCH;
wrbuf[3] = es;
w1_write_block(sl->master, wrbuf, 4);
/* Sleep for tprog ms to wait for the write to complete */
msleep(W1_F2D_TPROG_MS);
/* Reset the bus to wake up the EEPROM */
w1_reset_bus(sl->master);
return 0;
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len;
int copy;
count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* Can only write data in blocks of the size of the scratchpad */
addr = off;
len = count;
while (len > 0) {
/* if len too short or addr not aligned */
if (len < W1_F2D_SCRATCH_SIZE || addr & W1_F2D_SCRATCH_MASK) {
char tmp[W1_F2D_SCRATCH_SIZE];
/* read the block and update the parts to be written */
if (w1_f2d_readblock(sl, addr & ~W1_F2D_SCRATCH_MASK,
W1_F2D_SCRATCH_SIZE, tmp)) {
count = -EIO;
goto out_up;
}
/* copy at most to the boundary of the PAGE or len */
copy = W1_F2D_SCRATCH_SIZE -
(addr & W1_F2D_SCRATCH_MASK);
if (copy > len)
copy = len;
memcpy(&tmp[addr & W1_F2D_SCRATCH_MASK], buf, copy);
if (w1_f2d_write(sl, addr & ~W1_F2D_SCRATCH_MASK,
W1_F2D_SCRATCH_SIZE, tmp) < 0) {
count = -EIO;
goto out_up;
}
} else {
copy = W1_F2D_SCRATCH_SIZE;
if (w1_f2d_write(sl, addr, copy, buf) < 0) {
count = -EIO;
goto out_up;
}
}
buf += copy;
addr += copy;
len -= copy;
}
out_up:
mutex_unlock(&sl->master->bus_mutex);
return count;
}
static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
static struct bin_attribute *w1_f2d_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f2d_group = {
.bin_attrs = w1_f2d_bin_attrs,
};
static const struct attribute_group *w1_f2d_groups[] = {
&w1_f2d_group,
NULL,
};
static const struct w1_family_ops w1_f2d_fops = {
.groups = w1_f2d_groups,
};
static struct w1_family w1_family_2d = {
.fid = W1_EEPROM_DS2431,
.fops = &w1_f2d_fops,
};
module_w1_family(w1_family_2d);
MODULE_AUTHOR("Bernhard Weirich <[email protected]>");
MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2431));
|
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef TEST_KPROBES_H
#define TEST_KPROBES_H
extern unsigned long kprobes_target_odd_offs;
extern unsigned long kprobes_target_in_insn4_offs;
extern unsigned long kprobes_target_in_insn6_lo_offs;
extern unsigned long kprobes_target_in_insn6_hi_offs;
#endif
|
/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL divider clock driver
//
// Copyright (c) 2014 Actions Semi Inc.
// Author: David Liu <[email protected]>
//
// Copyright (c) 2018 Linaro Ltd.
// Author: Manivannan Sadhasivam <[email protected]>
#ifndef _OWL_DIVIDER_H_
#define _OWL_DIVIDER_H_
#include "owl-common.h"
struct owl_divider_hw {
u32 reg;
u8 shift;
u8 width;
u8 div_flags;
struct clk_div_table *table;
};
struct owl_divider {
struct owl_divider_hw div_hw;
struct owl_clk_common common;
};
#define OWL_DIVIDER_HW(_reg, _shift, _width, _div_flags, _table) \
{ \
.reg = _reg, \
.shift = _shift, \
.width = _width, \
.div_flags = _div_flags, \
.table = _table, \
}
#define OWL_DIVIDER(_struct, _name, _parent, _reg, \
_shift, _width, _table, _div_flags, _flags) \
struct owl_divider _struct = { \
.div_hw = OWL_DIVIDER_HW(_reg, _shift, _width, \
_div_flags, _table), \
.common = { \
.regmap = NULL, \
.hw.init = CLK_HW_INIT(_name, \
_parent, \
&owl_divider_ops, \
_flags), \
}, \
}
static inline struct owl_divider *hw_to_owl_divider(const struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
return container_of(common, struct owl_divider, common);
}
long owl_divider_helper_round_rate(struct owl_clk_common *common,
const struct owl_divider_hw *div_hw,
unsigned long rate,
unsigned long *parent_rate);
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
const struct owl_divider_hw *div_hw,
unsigned long parent_rate);
int owl_divider_helper_set_rate(const struct owl_clk_common *common,
const struct owl_divider_hw *div_hw,
unsigned long rate,
unsigned long parent_rate);
extern const struct clk_ops owl_divider_ops;
#endif /* _OWL_DIVIDER_H_ */
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Device Initialisation for Lasi/Asp/Wax/Dino
*
* (c) Copyright Matthew Wilcox <[email protected]> 2001-2002
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/serial_core.h>
#include <linux/signal.h>
#include <linux/types.h>
#include <asm/hardware.h>
#include <asm/parisc-device.h>
#include <asm/io.h>
#include "8250.h"
static int __init serial_init_chip(struct parisc_device *dev)
{
struct uart_8250_port uart;
unsigned long address;
int err;
#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
if (!dev->irq) {
/* We find some unattached serial ports by walking native
* busses. These should be silently ignored. Otherwise,
* what we have here is a missing parent device, so tell
* the user what they're missing.
*/
if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
dev_info(&dev->dev,
"Serial: device 0x%llx not configured.\n"
"Enable support for Wax, Lasi, Asp or Dino.\n",
(unsigned long long)dev->hpa.start);
return -ENODEV;
}
address = dev->hpa.start;
if (dev->id.sversion != 0x8d)
address += 0x800;
memset(&uart, 0, sizeof(uart));
uart.port.iotype = UPIO_MEM;
/* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
uart.port.uartclk = (dev->id.sversion != 0xad) ?
7272727 : 1843200;
uart.port.mapbase = address;
uart.port.membase = ioremap(address, 16);
if (!uart.port.membase) {
dev_warn(&dev->dev, "Failed to map memory\n");
return -ENOMEM;
}
uart.port.irq = dev->irq;
uart.port.flags = UPF_BOOT_AUTOCONF;
uart.port.dev = &dev->dev;
err = serial8250_register_8250_port(&uart);
if (err < 0) {
dev_warn(&dev->dev,
"serial8250_register_8250_port returned error %d\n",
err);
iounmap(uart.port.membase);
return err;
}
return 0;
}
static const struct parisc_device_id serial_tbl[] __initconst = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
{ 0 }
};
/* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1
* attached to Dino. Unfortunately, Dino appears before Lasi in the device
* tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one
* which only knows about Lasi and then a second which will find all the
* other serial ports. HPUX ignores this problem.
*/
static const struct parisc_device_id lasi_tbl[] __initconst = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */
{ HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */
{ 0 }
};
MODULE_DEVICE_TABLE(parisc, serial_tbl);
static struct parisc_driver lasi_driver __refdata = {
.name = "serial_1",
.id_table = lasi_tbl,
.probe = serial_init_chip,
};
static struct parisc_driver serial_driver __refdata = {
.name = "serial",
.id_table = serial_tbl,
.probe = serial_init_chip,
};
static int __init probe_serial_gsc(void)
{
register_parisc_driver(&lasi_driver);
register_parisc_driver(&serial_driver);
return 0;
}
module_init(probe_serial_gsc);
MODULE_DESCRIPTION("Serial Device Initialisation for Lasi/Asp/Wax/Dino");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
/* For debugging crashes, userspace can:
*
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
*
* to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup.
*
* Additionally:
*
* tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
*
* will capture just the cmdstream from submits which triggered a GPU hang.
*
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
*
* The module-param "rd_full", which defaults to false, enables snapshotting
* all (non-written) buffers in the submit, rather than just cmdstream bo's.
* This is useful to capture the contents of (for example) vbo's or textures,
* or shader programs (if not emitted inline in cmdstream).
*/
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
#ifdef CONFIG_DEBUG_FS
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
RD_CMD, /* ascii text */
RD_GPUADDR, /* u32 gpuaddr, u32 size */
RD_CONTEXT, /* raw dump */
RD_CMDSTREAM, /* raw dump */
RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
RD_FLUSH, /* empty, clear previous params */
RD_PROGRAM, /* shader program, raw dump */
RD_VERT_SHADER,
RD_FRAG_SHADER,
RD_BUFFER_CONTENTS,
RD_GPU_ID,
RD_CHIP_ID,
};
#define BUF_SZ 512 /* should be power of 2 */
/* space used: */
#define circ_count(circ) \
(CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
#define circ_count_to_end(circ) \
(CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
/* space available: */
#define circ_space(circ) \
(CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
#define circ_space_to_end(circ) \
(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
struct msm_rd_state {
struct drm_device *dev;
bool open;
/* fifo access is synchronized on the producer side by
* write_lock. And read_lock synchronizes the reads
*/
struct mutex read_lock, write_lock;
wait_queue_head_t fifo_event;
struct circ_buf fifo;
char buf[BUF_SZ];
};
static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
{
struct circ_buf *fifo = &rd->fifo;
const char *ptr = buf;
while (sz > 0) {
char *fptr = &fifo->buf[fifo->head];
int n;
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
if (!rd->open)
return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
* than once.
*/
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n;
ptr += n;
wake_up_all(&rd->fifo_event);
}
}
static void rd_write_section(struct msm_rd_state *rd,
enum rd_sect_type type, const void *buf, int sz)
{
rd_write(rd, &type, 4);
rd_write(rd, &sz, 4);
rd_write(rd, buf, sz);
}
static ssize_t rd_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos)
{
struct msm_rd_state *rd = file->private_data;
struct circ_buf *fifo = &rd->fifo;
const char *fptr = &fifo->buf[fifo->tail];
int n = 0, ret = 0;
mutex_lock(&rd->read_lock);
ret = wait_event_interruptible(rd->fifo_event,
circ_count(&rd->fifo) > 0);
if (ret)
goto out;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_CNT_TO_END() does not access the head more than
* once.
*/
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT;
goto out;
}
smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n;
wake_up_all(&rd->fifo_event);
out:
mutex_unlock(&rd->read_lock);
if (ret)
return ret;
return n;
}
static int rd_open(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
struct drm_device *dev = rd->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
uint64_t val;
uint32_t gpu_id;
uint32_t zero = 0;
int ret = 0;
if (!gpu)
return -ENODEV;
mutex_lock(&gpu->lock);
if (rd->open) {
ret = -EBUSY;
goto out;
}
file->private_data = rd;
rd->open = true;
/* Reset fifo to clear any previously unread data: */
rd->fifo.head = rd->fifo.tail = 0;
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
* Note: These particular params do not require a context
*/
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val, &zero);
gpu_id = val;
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val, &zero);
rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
out:
mutex_unlock(&gpu->lock);
return ret;
}
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
rd->open = false;
wake_up_all(&rd->fifo_event);
return 0;
}
static const struct file_operations rd_debugfs_fops = {
.owner = THIS_MODULE,
.open = rd_open,
.read = rd_read,
.release = rd_release,
};
static void rd_cleanup(struct msm_rd_state *rd)
{
if (!rd)
return;
mutex_destroy(&rd->read_lock);
mutex_destroy(&rd->write_lock);
kfree(rd);
}
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
struct msm_rd_state *rd;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return ERR_PTR(-ENOMEM);
rd->dev = minor->dev;
rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock);
mutex_init(&rd->write_lock);
init_waitqueue_head(&rd->fifo_event);
debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
&rd_debugfs_fops);
return rd;
}
int msm_rd_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
int ret;
if (!priv->gpu_pdev)
return 0;
/* only create on first minor: */
if (priv->rd)
return 0;
rd = rd_init(minor, "rd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->rd = rd;
rd = rd_init(minor, "hangrd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->hangrd = rd;
return 0;
fail:
msm_rd_debugfs_cleanup(priv);
return ret;
}
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{
rd_cleanup(priv->rd);
priv->rd = NULL;
rd_cleanup(priv->hangrd);
priv->hangrd = NULL;
}
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
uint64_t iova, uint32_t size, bool full)
{
struct drm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
const char *buf;
if (iova) {
offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->size;
}
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
*/
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
if (!full)
return;
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
buf += offset;
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(obj);
}
/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
struct task_struct *task;
char msg[256];
int i, n;
if (!rd->open)
return;
mutex_lock(&rd->write_lock);
if (fmt) {
va_list args;
va_start(args, fmt);
n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
}
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++)
snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4, true);
}
}
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets, we've logged the buffer, the
* parser tool will follow the IB based on the logged
* buffer/gpuaddr, so nothing more to do.
*/
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
(uint32_t[3]){ iova, szd, iova >> 32 }, 12);
break;
}
}
mutex_unlock(&rd->write_lock);
}
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* bvec iterator
*
* Copyright (C) 2001 Ming Lei <[email protected]>
*/
#ifndef __LINUX_BVEC_H
#define __LINUX_BVEC_H
#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/limits.h>
#include <linux/minmax.h>
#include <linux/types.h>
struct page;
/**
* struct bio_vec - a contiguous range of physical memory addresses
* @bv_page: First page associated with the address range.
* @bv_len: Number of bytes in the address range.
* @bv_offset: Start of the address range relative to the start of @bv_page.
*
* The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
*
* nth_page(@bv_page, n) == @bv_page + n
*
* This holds because page_is_mergeable() checks the above property.
*/
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
};
/**
* bvec_set_page - initialize a bvec based off a struct page
* @bv: bvec to initialize
* @page: page the bvec should point to
* @len: length of the bvec
* @offset: offset into the page
*/
static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
unsigned int len, unsigned int offset)
{
bv->bv_page = page;
bv->bv_len = len;
bv->bv_offset = offset;
}
/**
* bvec_set_folio - initialize a bvec based off a struct folio
* @bv: bvec to initialize
* @folio: folio the bvec should point to
* @len: length of the bvec
* @offset: offset into the folio
*/
static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
unsigned int len, unsigned int offset)
{
bvec_set_page(bv, &folio->page, len, offset);
}
/**
* bvec_set_virt - initialize a bvec based on a virtual address
* @bv: bvec to initialize
* @vaddr: virtual address to set the bvec to
* @len: length of the bvec
*/
static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
unsigned int len)
{
bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
}
struct bvec_iter {
sector_t bi_sector; /* device address in 512 byte
sectors */
unsigned int bi_size; /* residual I/O count */
unsigned int bi_idx; /* current index into bvl_vec */
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;
int idx;
unsigned done;
};
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
*/
#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
/* multi-page (mp_bvec) helpers */
#define mp_bvec_iter_page(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_page)
#define mp_bvec_iter_len(bvec, iter) \
min((iter).bi_size, \
__bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
#define mp_bvec_iter_offset(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
#define mp_bvec_iter_page_idx(bvec, iter) \
(mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
#define mp_bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
.bv_page = mp_bvec_iter_page((bvec), (iter)), \
.bv_len = mp_bvec_iter_len((bvec), (iter)), \
.bv_offset = mp_bvec_iter_offset((bvec), (iter)), \
})
/* For building single-page bvec in flight */
#define bvec_iter_offset(bvec, iter) \
(mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
#define bvec_iter_len(bvec, iter) \
min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \
PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
#define bvec_iter_page(bvec, iter) \
(mp_bvec_iter_page((bvec), (iter)) + \
mp_bvec_iter_page_idx((bvec), (iter)))
#define bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
.bv_page = bvec_iter_page((bvec), (iter)), \
.bv_len = bvec_iter_len((bvec), (iter)), \
.bv_offset = bvec_iter_offset((bvec), (iter)), \
})
static inline bool bvec_iter_advance(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned bytes)
{
unsigned int idx = iter->bi_idx;
if (WARN_ONCE(bytes > iter->bi_size,
"Attempted to advance past end of bvec iter\n")) {
iter->bi_size = 0;
return false;
}
iter->bi_size -= bytes;
bytes += iter->bi_bvec_done;
while (bytes && bytes >= bv[idx].bv_len) {
bytes -= bv[idx].bv_len;
idx++;
}
iter->bi_idx = idx;
iter->bi_bvec_done = bytes;
return true;
}
/*
* A simpler version of bvec_iter_advance(), @bytes should not span
* across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
*/
static inline void bvec_iter_advance_single(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned int bytes)
{
unsigned int done = iter->bi_bvec_done + bytes;
if (done == bv[iter->bi_idx].bv_len) {
done = 0;
iter->bi_idx++;
}
iter->bi_bvec_done = done;
iter->bi_size -= bytes;
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
.bi_sector = 0, \
.bi_size = UINT_MAX, \
.bi_idx = 0, \
.bi_bvec_done = 0, \
}
static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
{
iter_all->done = 0;
iter_all->idx = 0;
return &iter_all->bv;
}
static inline void bvec_advance(const struct bio_vec *bvec,
struct bvec_iter_all *iter_all)
{
struct bio_vec *bv = &iter_all->bv;
if (iter_all->done) {
bv->bv_page++;
bv->bv_offset = 0;
} else {
bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
}
bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
bvec->bv_len - iter_all->done);
iter_all->done += bv->bv_len;
if (iter_all->done == bvec->bv_len) {
iter_all->idx++;
iter_all->done = 0;
}
}
/**
* bvec_kmap_local - map a bvec into the kernel virtual address space
* @bvec: bvec to map
*
* Must be called on single-page bvecs only. Call kunmap_local on the returned
* address to unmap.
*/
static inline void *bvec_kmap_local(struct bio_vec *bvec)
{
return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
}
/**
* memcpy_from_bvec - copy data from a bvec
* @bvec: bvec to copy from
*
* Must be called on single-page bvecs only.
*/
static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
{
memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}
/**
* memcpy_to_bvec - copy data to a bvec
* @bvec: bvec to copy to
*
* Must be called on single-page bvecs only.
*/
static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
{
memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
}
/**
* memzero_bvec - zero all data in a bvec
* @bvec: bvec to zero
*
* Must be called on single-page bvecs only.
*/
static inline void memzero_bvec(struct bio_vec *bvec)
{
memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}
/**
* bvec_virt - return the virtual address for a bvec
* @bvec: bvec to return the virtual address for
*
* Note: the caller must ensure that @bvec->bv_page is not a highmem page.
*/
static inline void *bvec_virt(struct bio_vec *bvec)
{
WARN_ON_ONCE(PageHighMem(bvec->bv_page));
return page_address(bvec->bv_page) + bvec->bv_offset;
}
/**
* bvec_phys - return the physical address for a bvec
* @bvec: bvec to return the physical address for
*/
static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
{
/*
* Note this open codes page_to_phys because page_to_phys is defined in
* <asm/io.h>, which we don't want to pull in here. If it ever moves to
* a sensible place we should start using it.
*/
return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset;
}
#endif /* __LINUX_BVEC_H */
|
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "util/compress.h"
#include "util/debug.h"
int zstd_init(struct zstd_data *data, int level)
{
data->comp_level = level;
data->dstream = NULL;
data->cstream = NULL;
return 0;
}
int zstd_fini(struct zstd_data *data)
{
if (data->dstream) {
ZSTD_freeDStream(data->dstream);
data->dstream = NULL;
}
if (data->cstream) {
ZSTD_freeCStream(data->cstream);
data->cstream = NULL;
}
return 0;
}
ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
void *src, size_t src_size, size_t max_record_size,
size_t process_header(void *record, size_t increment))
{
size_t ret, size, compressed = 0;
ZSTD_inBuffer input = { src, src_size, 0 };
ZSTD_outBuffer output;
void *record;
if (!data->cstream) {
data->cstream = ZSTD_createCStream();
if (data->cstream == NULL) {
pr_err("Couldn't create compression stream.\n");
return -1;
}
ret = ZSTD_initCStream(data->cstream, data->comp_level);
if (ZSTD_isError(ret)) {
pr_err("Failed to initialize compression stream: %s\n",
ZSTD_getErrorName(ret));
return -1;
}
}
while (input.pos < input.size) {
record = dst;
size = process_header(record, 0);
compressed += size;
dst += size;
dst_size -= size;
output = (ZSTD_outBuffer){ dst, (dst_size > max_record_size) ?
max_record_size : dst_size, 0 };
ret = ZSTD_compressStream(data->cstream, &output, &input);
ZSTD_flushStream(data->cstream, &output);
if (ZSTD_isError(ret)) {
pr_err("failed to compress %ld bytes: %s\n",
(long)src_size, ZSTD_getErrorName(ret));
memcpy(dst, src, src_size);
return src_size;
}
size = output.pos;
size = process_header(record, size);
compressed += size;
dst += size;
dst_size -= size;
}
return compressed;
}
size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
void *dst, size_t dst_size)
{
size_t ret;
ZSTD_inBuffer input = { src, src_size, 0 };
ZSTD_outBuffer output = { dst, dst_size, 0 };
if (!data->dstream) {
data->dstream = ZSTD_createDStream();
if (data->dstream == NULL) {
pr_err("Couldn't create decompression stream.\n");
return 0;
}
ret = ZSTD_initDStream(data->dstream);
if (ZSTD_isError(ret)) {
pr_err("Failed to initialize decompression stream: %s\n",
ZSTD_getErrorName(ret));
return 0;
}
}
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n",
src_size, output.size, dst_size, ZSTD_getErrorName(ret));
break;
}
output.dst = dst + output.pos;
output.size = dst_size - output.pos;
}
return output.pos;
}
|
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_stacktrace_map_raw_tp(void)
{
const char *prog_name = "oncpu";
int control_map_fd, stackid_hmap_fd, stackmap_fd;
const char *file = "./test_stacktrace_map.bpf.o";
__u32 key, val, duration = 0;
int err, prog_fd;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link = NULL;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sched_switch");
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK_FAIL(control_map_fd < 0))
goto close_prog;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK_FAIL(stackid_hmap_fd < 0))
goto close_prog;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK_FAIL(stackmap_fd < 0))
goto close_prog;
/* give some time for bpf program run */
sleep(1);
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto close_prog;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto close_prog;
close_prog:
bpf_link__destroy(link);
bpf_object__close(obj);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Danila Tikhonov <[email protected]>
* Copyright (c) 2024, David Wronek <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm7150-dispcc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
enum {
DT_BI_TCXO,
DT_BI_TCXO_AO,
DT_GCC_DISP_GPLL0_CLK,
DT_CHIP_SLEEP_CLK,
DT_DSI0_PHY_PLL_OUT_BYTECLK,
DT_DSI0_PHY_PLL_OUT_DSICLK,
DT_DSI1_PHY_PLL_OUT_BYTECLK,
DT_DSI1_PHY_PLL_OUT_DSICLK,
DT_DP_PHY_PLL_LINK_CLK,
DT_DP_PHY_PLL_VCO_DIV_CLK,
};
enum {
P_BI_TCXO,
P_CHIP_SLEEP_CLK,
P_DISPCC_PLL0_OUT_EVEN,
P_DISPCC_PLL0_OUT_MAIN,
P_DP_PHY_PLL_LINK_CLK,
P_DP_PHY_PLL_VCO_DIV_CLK,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
P_DSI1_PHY_PLL_OUT_BYTECLK,
P_DSI1_PHY_PLL_OUT_DSICLK,
P_GCC_DISP_GPLL0_CLK,
};
static const struct pll_vco fabia_vco[] = {
{ 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
/* 860MHz configuration */
static const struct alpha_pll_config dispcc_pll0_config = {
.l = 0x2c,
.alpha = 0xcaaa,
.test_ctl_val = 0x40000000,
};
static struct clk_alpha_pll dispcc_pll0 = {
.offset = 0x0,
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_pll0",
.parent_data = &(const struct clk_parent_data) {
.index = DT_BI_TCXO,
},
.num_parents = 1,
.ops = &clk_alpha_pll_fabia_ops,
},
},
};
static const struct parent_map dispcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
{ P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
};
static const struct clk_parent_data dispcc_parent_data_0[] = {
{ .index = DT_BI_TCXO },
{ .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
{ .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
};
static const struct parent_map dispcc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_DP_PHY_PLL_LINK_CLK, 1 },
{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
};
static const struct clk_parent_data dispcc_parent_data_1[] = {
{ .index = DT_BI_TCXO },
{ .index = DT_DP_PHY_PLL_LINK_CLK },
{ .index = DT_DP_PHY_PLL_VCO_DIV_CLK },
};
static const struct parent_map dispcc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
};
static const struct clk_parent_data dispcc_parent_data_2[] = {
{ .index = DT_BI_TCXO },
};
static const struct clk_parent_data dispcc_parent_data_2_ao[] = {
{ .index = DT_BI_TCXO_AO },
};
static const struct parent_map dispcc_parent_map_3[] = {
{ P_BI_TCXO, 0 },
{ P_DISPCC_PLL0_OUT_MAIN, 1 },
{ P_GCC_DISP_GPLL0_CLK, 4 },
{ P_DISPCC_PLL0_OUT_EVEN, 5 },
};
static const struct clk_parent_data dispcc_parent_data_3[] = {
{ .index = DT_BI_TCXO },
{ .hw = &dispcc_pll0.clkr.hw },
{ .index = DT_GCC_DISP_GPLL0_CLK },
{ .hw = &dispcc_pll0.clkr.hw },
};
static const struct parent_map dispcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
};
static const struct clk_parent_data dispcc_parent_data_4[] = {
{ .index = DT_BI_TCXO },
{ .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
{ .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
};
static const struct parent_map dispcc_parent_map_5[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_DISP_GPLL0_CLK, 4 },
};
static const struct clk_parent_data dispcc_parent_data_5[] = {
{ .index = DT_BI_TCXO },
{ .index = DT_GCC_DISP_GPLL0_CLK },
};
static const struct parent_map dispcc_parent_map_6[] = {
{ P_CHIP_SLEEP_CLK, 0 },
};
static const struct clk_parent_data dispcc_parent_data_6[] = {
{ .index = DT_CHIP_SLEEP_CLK },
};
static const struct freq_tbl ftbl_dispcc_mdss_ahb_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(37500000, P_GCC_DISP_GPLL0_CLK, 16, 0, 0),
F(75000000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_mdss_ahb_clk_src = {
.cmd_rcgr = 0x22bc,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_5,
.freq_tbl = ftbl_dispcc_mdss_ahb_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_ahb_clk_src",
.parent_data = dispcc_parent_data_5,
.num_parents = ARRAY_SIZE(dispcc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops,
},
};
static const struct freq_tbl ftbl_dispcc_mdss_byte0_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_mdss_byte0_clk_src = {
.cmd_rcgr = 0x2110,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_0,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte0_clk_src",
.parent_data = dispcc_parent_data_0,
.num_parents = ARRAY_SIZE(dispcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
};
static struct clk_rcg2 dispcc_mdss_byte1_clk_src = {
.cmd_rcgr = 0x212c,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_0,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte1_clk_src",
.parent_data = dispcc_parent_data_0,
.num_parents = ARRAY_SIZE(dispcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
};
static struct clk_rcg2 dispcc_mdss_dp_aux_clk_src = {
.cmd_rcgr = 0x21dc,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_2,
.freq_tbl = ftbl_dispcc_mdss_byte0_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_aux_clk_src",
.parent_data = dispcc_parent_data_2,
.num_parents = ARRAY_SIZE(dispcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
static const struct freq_tbl ftbl_dispcc_mdss_dp_crypto_clk_src[] = {
F(108000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
F(180000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
F(360000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
F(540000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_mdss_dp_crypto_clk_src = {
.cmd_rcgr = 0x2194,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_1,
.freq_tbl = ftbl_dispcc_mdss_dp_crypto_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_crypto_clk_src",
.parent_data = dispcc_parent_data_1,
.num_parents = ARRAY_SIZE(dispcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
static struct clk_rcg2 dispcc_mdss_dp_link_clk_src = {
.cmd_rcgr = 0x2178,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_1,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_link_clk_src",
.parent_data = dispcc_parent_data_1,
.num_parents = ARRAY_SIZE(dispcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
};
static struct clk_rcg2 dispcc_mdss_dp_pixel1_clk_src = {
.cmd_rcgr = 0x21c4,
.mnd_width = 16,
.hid_width = 5,
.parent_map = dispcc_parent_map_1,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_pixel1_clk_src",
.parent_data = dispcc_parent_data_1,
.num_parents = ARRAY_SIZE(dispcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_dp_ops,
},
};
static struct clk_rcg2 dispcc_mdss_dp_pixel_clk_src = {
.cmd_rcgr = 0x21ac,
.mnd_width = 16,
.hid_width = 5,
.parent_map = dispcc_parent_map_1,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_pixel_clk_src",
.parent_data = dispcc_parent_data_1,
.num_parents = ARRAY_SIZE(dispcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_dp_ops,
},
};
static struct clk_rcg2 dispcc_mdss_esc0_clk_src = {
.cmd_rcgr = 0x2148,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_0,
.freq_tbl = ftbl_dispcc_mdss_byte0_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_esc0_clk_src",
.parent_data = dispcc_parent_data_0,
.num_parents = ARRAY_SIZE(dispcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
static struct clk_rcg2 dispcc_mdss_esc1_clk_src = {
.cmd_rcgr = 0x2160,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_0,
.freq_tbl = ftbl_dispcc_mdss_byte0_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_esc1_clk_src",
.parent_data = dispcc_parent_data_0,
.num_parents = ARRAY_SIZE(dispcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
static const struct freq_tbl ftbl_dispcc_mdss_mdp_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(85714286, P_GCC_DISP_GPLL0_CLK, 7, 0, 0),
F(100000000, P_GCC_DISP_GPLL0_CLK, 6, 0, 0),
F(150000000, P_GCC_DISP_GPLL0_CLK, 4, 0, 0),
F(172000000, P_DISPCC_PLL0_OUT_MAIN, 5, 0, 0),
F(200000000, P_GCC_DISP_GPLL0_CLK, 3, 0, 0),
F(286666667, P_DISPCC_PLL0_OUT_MAIN, 3, 0, 0),
F(300000000, P_GCC_DISP_GPLL0_CLK, 2, 0, 0),
F(344000000, P_DISPCC_PLL0_OUT_MAIN, 2.5, 0, 0),
F(430000000, P_DISPCC_PLL0_OUT_MAIN, 2, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_mdss_mdp_clk_src = {
.cmd_rcgr = 0x20c8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_3,
.freq_tbl = ftbl_dispcc_mdss_mdp_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_mdp_clk_src",
.parent_data = dispcc_parent_data_3,
.num_parents = ARRAY_SIZE(dispcc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops,
},
};
static struct clk_rcg2 dispcc_mdss_pclk0_clk_src = {
.cmd_rcgr = 0x2098,
.mnd_width = 8,
.hid_width = 5,
.parent_map = dispcc_parent_map_4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_pclk0_clk_src",
.parent_data = dispcc_parent_data_4,
.num_parents = ARRAY_SIZE(dispcc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_pixel_ops,
},
};
static struct clk_rcg2 dispcc_mdss_pclk1_clk_src = {
.cmd_rcgr = 0x20b0,
.mnd_width = 8,
.hid_width = 5,
.parent_map = dispcc_parent_map_4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_pclk1_clk_src",
.parent_data = dispcc_parent_data_4,
.num_parents = ARRAY_SIZE(dispcc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_pixel_ops,
},
};
static const struct freq_tbl ftbl_dispcc_mdss_rot_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(171428571, P_GCC_DISP_GPLL0_CLK, 3.5, 0, 0),
F(200000000, P_GCC_DISP_GPLL0_CLK, 3, 0, 0),
F(300000000, P_GCC_DISP_GPLL0_CLK, 2, 0, 0),
F(344000000, P_DISPCC_PLL0_OUT_MAIN, 2.5, 0, 0),
F(430000000, P_DISPCC_PLL0_OUT_MAIN, 2, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_mdss_rot_clk_src = {
.cmd_rcgr = 0x20e0,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_3,
.freq_tbl = ftbl_dispcc_mdss_rot_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_rot_clk_src",
.parent_data = dispcc_parent_data_3,
.num_parents = ARRAY_SIZE(dispcc_parent_data_3),
.ops = &clk_rcg2_shared_ops,
},
};
static struct clk_rcg2 dispcc_mdss_vsync_clk_src = {
.cmd_rcgr = 0x20f8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_2,
.freq_tbl = ftbl_dispcc_mdss_byte0_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_vsync_clk_src",
.parent_data = dispcc_parent_data_2,
.num_parents = ARRAY_SIZE(dispcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
static const struct freq_tbl ftbl_dispcc_sleep_clk_src[] = {
F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0),
{ }
};
static struct clk_rcg2 dispcc_sleep_clk_src = {
.cmd_rcgr = 0x6060,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_6,
.freq_tbl = ftbl_dispcc_sleep_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_sleep_clk_src",
.parent_data = dispcc_parent_data_6,
.num_parents = ARRAY_SIZE(dispcc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
static struct clk_rcg2 dispcc_xo_clk_src = {
.cmd_rcgr = 0x6044,
.mnd_width = 0,
.hid_width = 5,
.parent_map = dispcc_parent_map_2,
.freq_tbl = ftbl_dispcc_mdss_byte0_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "dispcc_xo_clk_src",
.parent_data = dispcc_parent_data_2_ao,
.num_parents = ARRAY_SIZE(dispcc_parent_data_2_ao),
.ops = &clk_rcg2_ops,
},
};
static struct clk_branch dispcc_mdss_ahb_clk = {
.halt_reg = 0x2080,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2080,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_byte0_clk = {
.halt_reg = 0x2028,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2028,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte0_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap_div dispcc_mdss_byte0_div_clk_src = {
.reg = 0x2128,
.shift = 0,
.width = 2,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte0_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_branch dispcc_mdss_byte0_intf_clk = {
.halt_reg = 0x202c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x202c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte0_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte0_div_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_byte1_clk = {
.halt_reg = 0x2030,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2030,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte1_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap_div dispcc_mdss_byte1_div_clk_src = {
.reg = 0x2144,
.shift = 0,
.width = 2,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte1_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_branch dispcc_mdss_byte1_intf_clk = {
.halt_reg = 0x2034,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2034,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_byte1_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_byte1_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_aux_clk = {
.halt_reg = 0x2054,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2054,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_crypto_clk = {
.halt_reg = 0x2048,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2048,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_crypto_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_link_clk = {
.halt_reg = 0x2040,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2040,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_link_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_link_intf_clk = {
.halt_reg = 0x2044,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2044,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_link_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_pixel1_clk = {
.halt_reg = 0x2050,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2050,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_pixel1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_dp_pixel_clk = {
.halt_reg = 0x204c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x204c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_dp_pixel_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_dp_pixel_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_esc0_clk = {
.halt_reg = 0x2038,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2038,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_esc0_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_esc1_clk = {
.halt_reg = 0x203c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x203c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_esc1_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_esc1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_mdp_clk = {
.halt_reg = 0x200c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x200c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_mdp_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_mdp_lut_clk = {
.halt_reg = 0x201c,
.halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x201c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_mdp_lut_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_non_gdsc_ahb_clk = {
.halt_reg = 0x4004,
.halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x4004,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_non_gdsc_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_pclk0_clk = {
.halt_reg = 0x2004,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2004,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_pclk0_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_pclk1_clk = {
.halt_reg = 0x2008,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2008,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_pclk1_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_pclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_rot_clk = {
.halt_reg = 0x2014,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2014,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_rot_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_rot_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_rscc_ahb_clk = {
.halt_reg = 0x400c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x400c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_rscc_ahb_clk",
.parent_names = (const char *[]) {
"dispcc_mdss_ahb_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_rscc_vsync_clk = {
.halt_reg = 0x4008,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x4008,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_rscc_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_mdss_vsync_clk = {
.halt_reg = 0x2024,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x2024,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_mdss_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&dispcc_mdss_vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch dispcc_sleep_clk = {
.halt_reg = 0x6078,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x6078,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "dispcc_sleep_clk",
.parent_names = (const char *[]) {
"dispcc_sleep_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
.en_rest_wait_val = 0x2,
.en_few_wait_val = 0x2,
.clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
};
static struct clk_regmap *dispcc_sm7150_clocks[] = {
[DISPCC_MDSS_AHB_CLK] = &dispcc_mdss_ahb_clk.clkr,
[DISPCC_MDSS_AHB_CLK_SRC] = &dispcc_mdss_ahb_clk_src.clkr,
[DISPCC_MDSS_BYTE0_CLK] = &dispcc_mdss_byte0_clk.clkr,
[DISPCC_MDSS_BYTE0_CLK_SRC] = &dispcc_mdss_byte0_clk_src.clkr,
[DISPCC_MDSS_BYTE0_DIV_CLK_SRC] = &dispcc_mdss_byte0_div_clk_src.clkr,
[DISPCC_MDSS_BYTE0_INTF_CLK] = &dispcc_mdss_byte0_intf_clk.clkr,
[DISPCC_MDSS_BYTE1_CLK] = &dispcc_mdss_byte1_clk.clkr,
[DISPCC_MDSS_BYTE1_CLK_SRC] = &dispcc_mdss_byte1_clk_src.clkr,
[DISPCC_MDSS_BYTE1_DIV_CLK_SRC] = &dispcc_mdss_byte1_div_clk_src.clkr,
[DISPCC_MDSS_BYTE1_INTF_CLK] = &dispcc_mdss_byte1_intf_clk.clkr,
[DISPCC_MDSS_DP_AUX_CLK] = &dispcc_mdss_dp_aux_clk.clkr,
[DISPCC_MDSS_DP_AUX_CLK_SRC] = &dispcc_mdss_dp_aux_clk_src.clkr,
[DISPCC_MDSS_DP_CRYPTO_CLK] = &dispcc_mdss_dp_crypto_clk.clkr,
[DISPCC_MDSS_DP_CRYPTO_CLK_SRC] = &dispcc_mdss_dp_crypto_clk_src.clkr,
[DISPCC_MDSS_DP_LINK_CLK] = &dispcc_mdss_dp_link_clk.clkr,
[DISPCC_MDSS_DP_LINK_CLK_SRC] = &dispcc_mdss_dp_link_clk_src.clkr,
[DISPCC_MDSS_DP_LINK_INTF_CLK] = &dispcc_mdss_dp_link_intf_clk.clkr,
[DISPCC_MDSS_DP_PIXEL1_CLK] = &dispcc_mdss_dp_pixel1_clk.clkr,
[DISPCC_MDSS_DP_PIXEL1_CLK_SRC] = &dispcc_mdss_dp_pixel1_clk_src.clkr,
[DISPCC_MDSS_DP_PIXEL_CLK] = &dispcc_mdss_dp_pixel_clk.clkr,
[DISPCC_MDSS_DP_PIXEL_CLK_SRC] = &dispcc_mdss_dp_pixel_clk_src.clkr,
[DISPCC_MDSS_ESC0_CLK] = &dispcc_mdss_esc0_clk.clkr,
[DISPCC_MDSS_ESC0_CLK_SRC] = &dispcc_mdss_esc0_clk_src.clkr,
[DISPCC_MDSS_ESC1_CLK] = &dispcc_mdss_esc1_clk.clkr,
[DISPCC_MDSS_ESC1_CLK_SRC] = &dispcc_mdss_esc1_clk_src.clkr,
[DISPCC_MDSS_MDP_CLK] = &dispcc_mdss_mdp_clk.clkr,
[DISPCC_MDSS_MDP_CLK_SRC] = &dispcc_mdss_mdp_clk_src.clkr,
[DISPCC_MDSS_MDP_LUT_CLK] = &dispcc_mdss_mdp_lut_clk.clkr,
[DISPCC_MDSS_NON_GDSC_AHB_CLK] = &dispcc_mdss_non_gdsc_ahb_clk.clkr,
[DISPCC_MDSS_PCLK0_CLK] = &dispcc_mdss_pclk0_clk.clkr,
[DISPCC_MDSS_PCLK0_CLK_SRC] = &dispcc_mdss_pclk0_clk_src.clkr,
[DISPCC_MDSS_PCLK1_CLK] = &dispcc_mdss_pclk1_clk.clkr,
[DISPCC_MDSS_PCLK1_CLK_SRC] = &dispcc_mdss_pclk1_clk_src.clkr,
[DISPCC_MDSS_ROT_CLK] = &dispcc_mdss_rot_clk.clkr,
[DISPCC_MDSS_ROT_CLK_SRC] = &dispcc_mdss_rot_clk_src.clkr,
[DISPCC_MDSS_RSCC_AHB_CLK] = &dispcc_mdss_rscc_ahb_clk.clkr,
[DISPCC_MDSS_RSCC_VSYNC_CLK] = &dispcc_mdss_rscc_vsync_clk.clkr,
[DISPCC_MDSS_VSYNC_CLK] = &dispcc_mdss_vsync_clk.clkr,
[DISPCC_MDSS_VSYNC_CLK_SRC] = &dispcc_mdss_vsync_clk_src.clkr,
[DISPCC_PLL0] = &dispcc_pll0.clkr,
[DISPCC_SLEEP_CLK] = &dispcc_sleep_clk.clkr,
[DISPCC_SLEEP_CLK_SRC] = &dispcc_sleep_clk_src.clkr,
[DISPCC_XO_CLK_SRC] = &dispcc_xo_clk_src.clkr,
};
static struct gdsc *dispcc_sm7150_gdscs[] = {
[MDSS_GDSC] = &mdss_gdsc,
};
static const struct regmap_config dispcc_sm7150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
.fast_io = true,
};
static const struct qcom_cc_desc dispcc_sm7150_desc = {
.config = &dispcc_sm7150_regmap_config,
.clks = dispcc_sm7150_clocks,
.num_clks = ARRAY_SIZE(dispcc_sm7150_clocks),
.gdscs = dispcc_sm7150_gdscs,
.num_gdscs = ARRAY_SIZE(dispcc_sm7150_gdscs),
};
static const struct of_device_id dispcc_sm7150_match_table[] = {
{ .compatible = "qcom,sm7150-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, dispcc_sm7150_match_table);
static int dispcc_sm7150_probe(struct platform_device *pdev)
{
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &dispcc_sm7150_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
clk_fabia_pll_configure(&dispcc_pll0, regmap, &dispcc_pll0_config);
/* Enable clock gating for DSI and MDP clocks */
regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
/* Keep some clocks always-on */
qcom_branch_set_clk_en(regmap, 0x605c); /* DISPCC_XO_CLK */
return qcom_cc_really_probe(&pdev->dev, &dispcc_sm7150_desc, regmap);
}
static struct platform_driver dispcc_sm7150_driver = {
.probe = dispcc_sm7150_probe,
.driver = {
.name = "dispcc-sm7150",
.of_match_table = dispcc_sm7150_match_table,
},
};
module_platform_driver(dispcc_sm7150_driver);
MODULE_DESCRIPTION("Qualcomm SM7150 Display Clock Controller");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor policy manipulation functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2017 Canonical Ltd.
*
* AppArmor policy namespaces, allow for different sets of policies
* to be loaded for tasks within the namespace.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "include/apparmor.h"
#include "include/cred.h"
#include "include/policy_ns.h"
#include "include/label.h"
#include "include/policy.h"
/* kernel label */
struct aa_label *kernel_t;
/* root profile namespace */
struct aa_ns *root_ns;
const char *aa_hidden_ns_name = "---";
/**
* aa_ns_visible - test if @view is visible from @curr
* @curr: namespace to treat as the parent (NOT NULL)
* @view: namespace to test if visible from @curr (NOT NULL)
* @subns: whether view of a subns is allowed
*
* Returns: true if @view is visible from @curr else false
*/
bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns)
{
if (curr == view)
return true;
if (!subns)
return false;
for ( ; view; view = view->parent) {
if (view->parent == curr)
return true;
}
return false;
}
/**
* aa_ns_name - Find the ns name to display for @view from @curr
* @curr: current namespace (NOT NULL)
* @view: namespace attempting to view (NOT NULL)
* @subns: are subns visible
*
* Returns: name of @view visible from @curr
*/
const char *aa_ns_name(struct aa_ns *curr, struct aa_ns *view, bool subns)
{
/* if view == curr then the namespace name isn't displayed */
if (curr == view)
return "";
if (aa_ns_visible(curr, view, subns)) {
/* at this point if a ns is visible it is in a view ns
* thus the curr ns.hname is a prefix of its name.
* Only output the virtualized portion of the name
* Add + 2 to skip over // separating curr hname prefix
* from the visible tail of the views hname
*/
return view->base.hname + strlen(curr->base.hname) + 2;
}
return aa_hidden_ns_name;
}
static struct aa_profile *alloc_unconfined(const char *name)
{
struct aa_profile *profile;
profile = aa_alloc_null(NULL, name, GFP_KERNEL);
if (!profile)
return NULL;
profile->label.flags |= FLAG_IX_ON_NAME_ERROR |
FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
profile->mode = APPARMOR_UNCONFINED;
return profile;
}
/**
* alloc_ns - allocate, initialize and return a new namespace
* @prefix: parent namespace name (MAYBE NULL)
* @name: a preallocated name (NOT NULL)
*
* Returns: refcounted namespace or NULL on failure.
*/
static struct aa_ns *alloc_ns(const char *prefix, const char *name)
{
struct aa_ns *ns;
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
AA_DEBUG("%s(%p)\n", __func__, ns);
if (!ns)
return NULL;
if (!aa_policy_init(&ns->base, prefix, name, GFP_KERNEL))
goto fail_ns;
INIT_LIST_HEAD(&ns->sub_ns);
INIT_LIST_HEAD(&ns->rawdata_list);
mutex_init(&ns->lock);
init_waitqueue_head(&ns->wait);
/* released by aa_free_ns() */
ns->unconfined = alloc_unconfined("unconfined");
if (!ns->unconfined)
goto fail_unconfined;
/* ns and ns->unconfined share ns->unconfined refcount */
ns->unconfined->ns = ns;
atomic_set(&ns->uniq_null, 0);
aa_labelset_init(&ns->labels);
return ns;
fail_unconfined:
aa_policy_destroy(&ns->base);
fail_ns:
kfree_sensitive(ns);
return NULL;
}
/**
* aa_free_ns - free a profile namespace
* @ns: the namespace to free (MAYBE NULL)
*
* Requires: All references to the namespace must have been put, if the
* namespace was referenced by a profile confining a task,
*/
void aa_free_ns(struct aa_ns *ns)
{
if (!ns)
return;
aa_policy_destroy(&ns->base);
aa_labelset_destroy(&ns->labels);
aa_put_ns(ns->parent);
ns->unconfined->ns = NULL;
aa_free_profile(ns->unconfined);
kfree_sensitive(ns);
}
/**
* __aa_lookupn_ns - lookup the namespace matching @hname
* @view: namespace to search in (NOT NULL)
* @hname: hierarchical ns name (NOT NULL)
* @n: length of @hname
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted ns pointer or NULL if not found
*
* Do a relative name lookup, recursing through profile tree.
*/
struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n)
{
struct aa_ns *ns = view;
const char *split;
for (split = strnstr(hname, "//", n); split;
split = strnstr(hname, "//", n)) {
ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname);
if (!ns)
return NULL;
n -= split + 2 - hname;
hname = split + 2;
}
if (n)
return __aa_findn_ns(&ns->sub_ns, hname, n);
return NULL;
}
/**
* aa_lookupn_ns - look up a policy namespace relative to @view
* @view: namespace to search in (NOT NULL)
* @name: name of namespace to find (NOT NULL)
* @n: length of @name
*
* Returns: a refcounted namespace on the list, or NULL if no namespace
* called @name exists.
*
* refcount released by caller
*/
struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n)
{
struct aa_ns *ns = NULL;
rcu_read_lock();
ns = aa_get_ns(__aa_lookupn_ns(view, name, n));
rcu_read_unlock();
return ns;
}
static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
struct dentry *dir)
{
struct aa_ns *ns;
int error;
AA_BUG(!parent);
AA_BUG(!name);
AA_BUG(!mutex_is_locked(&parent->lock));
ns = alloc_ns(parent->base.hname, name);
if (!ns)
return ERR_PTR(-ENOMEM);
ns->level = parent->level + 1;
mutex_lock_nested(&ns->lock, ns->level);
error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
if (error) {
AA_ERROR("Failed to create interface for ns %s\n",
ns->base.name);
mutex_unlock(&ns->lock);
aa_free_ns(ns);
return ERR_PTR(error);
}
ns->parent = aa_get_ns(parent);
list_add_rcu(&ns->base.list, &parent->sub_ns);
/* add list ref */
aa_get_ns(ns);
mutex_unlock(&ns->lock);
return ns;
}
/**
* __aa_find_or_create_ns - create an ns, fail if it already exists
* @parent: the parent of the namespace being created
* @name: the name of the namespace
* @dir: if not null the dir to put the ns entries in
*
* Returns: the a refcounted ns that has been add or an ERR_PTR
*/
struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
struct dentry *dir)
{
struct aa_ns *ns;
AA_BUG(!mutex_is_locked(&parent->lock));
/* try and find the specified ns */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
if (!ns)
ns = __aa_create_ns(parent, name, dir);
else
ns = ERR_PTR(-EEXIST);
/* return ref */
return ns;
}
/**
* aa_prepare_ns - find an existing or create a new namespace of @name
* @parent: ns to treat as parent
* @name: the namespace to find or add (NOT NULL)
*
* Returns: refcounted namespace or PTR_ERR if failed to create one
*/
struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name)
{
struct aa_ns *ns;
mutex_lock_nested(&parent->lock, parent->level);
/* try and find the specified ns and if it doesn't exist create it */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
if (!ns)
ns = __aa_create_ns(parent, name, NULL);
mutex_unlock(&parent->lock);
/* return ref */
return ns;
}
static void __ns_list_release(struct list_head *head);
/**
* destroy_ns - remove everything contained by @ns
* @ns: namespace to have it contents removed (NOT NULL)
*/
static void destroy_ns(struct aa_ns *ns)
{
if (!ns)
return;
mutex_lock_nested(&ns->lock, ns->level);
/* release all profiles in this namespace */
__aa_profile_list_release(&ns->base.profiles);
/* release all sub namespaces */
__ns_list_release(&ns->sub_ns);
if (ns->parent) {
unsigned long flags;
write_lock_irqsave(&ns->labels.lock, flags);
__aa_proxy_redirect(ns_unconfined(ns),
ns_unconfined(ns->parent));
write_unlock_irqrestore(&ns->labels.lock, flags);
}
__aafs_ns_rmdir(ns);
mutex_unlock(&ns->lock);
}
/**
* __aa_remove_ns - remove a namespace and all its children
* @ns: namespace to be removed (NOT NULL)
*
* Requires: ns->parent->lock be held and ns removed from parent.
*/
void __aa_remove_ns(struct aa_ns *ns)
{
/* remove ns from namespace list */
list_del_rcu(&ns->base.list);
destroy_ns(ns);
aa_put_ns(ns);
}
/**
* __ns_list_release - remove all profile namespaces on the list put refs
* @head: list of profile namespaces (NOT NULL)
*
* Requires: namespace lock be held
*/
static void __ns_list_release(struct list_head *head)
{
struct aa_ns *ns, *tmp;
list_for_each_entry_safe(ns, tmp, head, base.list)
__aa_remove_ns(ns);
}
/**
* aa_alloc_root_ns - allocate the root profile namespace
*
* Returns: %0 on success else error
*
*/
int __init aa_alloc_root_ns(void)
{
struct aa_profile *kernel_p;
/* released by aa_free_root_ns - used as list ref*/
root_ns = alloc_ns(NULL, "root");
if (!root_ns)
return -ENOMEM;
kernel_p = alloc_unconfined("kernel_t");
if (!kernel_p) {
destroy_ns(root_ns);
aa_free_ns(root_ns);
return -ENOMEM;
}
kernel_t = &kernel_p->label;
root_ns->unconfined->ns = aa_get_ns(root_ns);
return 0;
}
/**
* aa_free_root_ns - free the root profile namespace
*/
void __init aa_free_root_ns(void)
{
struct aa_ns *ns = root_ns;
root_ns = NULL;
aa_label_free(kernel_t);
destroy_ns(ns);
aa_put_ns(ns);
}
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2014-2015 Hisilicon Limited.
*/
#ifndef _HNS_DSAF_RCB_H
#define _HNS_DSAF_RCB_H
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include "hnae.h"
#include "hns_dsaf_main.h"
struct rcb_common_cb;
#define HNS_RCB_IRQ_NUM_PER_QUEUE 2
#define HNS_RCB_IRQ_IDX_TX 0
#define HNS_RCB_IRQ_IDX_RX 1
#define HNS_RCB_TX_REG_OFFSET 0x40
#define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
#define HNS_RCB_DEBUG_NW_ENGINE_NUM 1
#define HNS_RCB_RING_MAX_BD_PER_PKT 3
#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3
#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8
#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
#define HNS_RCB_RING_MAX_PENDING_BD 1024
#define HNS_RCB_RING_MIN_PENDING_BD 16
#define HNS_RCB_REG_OFFSET 0x10000
#define HNS_RCB_TX_FRAMES_LOW 1
#define HNS_RCB_RX_FRAMES_LOW 1
#define HNS_RCB_TX_FRAMES_HIGH 1023
#define HNS_RCB_RX_FRAMES_HIGH 1023
#define HNS_RCB_TX_USECS_LOW 1
#define HNS_RCB_RX_USECS_LOW 1
#define HNS_RCB_TX_USECS_HIGH 1023
#define HNS_RCB_RX_USECS_HIGH 1023
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
#define HNS_RCB_DEF_COALESCED_USECS 30
#define HNS_RCB_DEF_GAP_TIME_USECS 20
#define HNS_RCB_TX_PKTLINE_OFFSET 8
#define HNS_RCB_COMMON_ENDIAN 1
#define HNS_BD_SIZE_512_TYPE 0
#define HNS_BD_SIZE_1024_TYPE 1
#define HNS_BD_SIZE_2048_TYPE 2
#define HNS_BD_SIZE_4096_TYPE 3
#define HNS_RCB_COMMON_DUMP_REG_NUM 80
#define HNS_RCB_RING_DUMP_REG_NUM 40
#define HNS_RING_STATIC_REG_NUM 28
#define HNS_DUMP_REG_NUM 500
#define HNS_STATIC_REG_NUM 12
#define HNS_TSO_MODE_8BD_32K 1
#define HNS_TSO_MDOE_4BD_16K 0
enum rcb_int_flag {
RCB_INT_FLAG_TX = 0x1,
RCB_INT_FLAG_RX = (0x1 << 1),
RCB_INT_FLAG_MAX = (0x1 << 2), /*must be the last element */
};
struct hns_ring_hw_stats {
u64 tx_pkts;
u64 ppe_tx_ok_pkts;
u64 ppe_tx_drop_pkts;
u64 rx_pkts;
u64 ppe_rx_ok_pkts;
u64 ppe_rx_drop_pkts;
};
struct ring_pair_cb {
struct rcb_common_cb *rcb_common; /* ring belongs to */
struct device *dev; /*device for DMA mapping */
struct hnae_queue q;
u16 index; /* global index in a rcb common device */
u16 buf_size;
int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
u8 port_id_in_comm;
u8 used_by_vf;
struct hns_ring_hw_stats hw_stats;
};
struct rcb_common_cb {
u8 __iomem *io_base;
phys_addr_t phy_base;
struct dsaf_device *dsaf_dev;
u16 max_vfn;
u16 max_q_per_vf;
u8 comm_index;
u32 ring_num;
u32 desc_num; /* desc num per queue*/
struct ring_pair_cb ring_pair_cb[] __counted_by(ring_num);
};
int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask);
void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx);
int hns_rcb_set_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
int hns_rcb_set_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
int hns_rcb_set_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
int hns_rcb_get_ring_sset_count(int stringset);
int hns_rcb_get_common_regs_count(void);
int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
void hns_rcb_get_strings(int stringset, u8 **data, int index);
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
#endif /* _HNS_DSAF_RCB_H */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
*/
/dts-v1/;
#include "meson-g12a.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/gpio/meson-g12a-gpio.h>
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
#include <dt-bindings/sound/meson-g12a-toacodec.h>
/ {
compatible = "amlogic,u200", "amlogic,g12a";
model = "Amlogic Meson G12A U200 Development Board";
aliases {
serial0 = &uart_AO;
ethernet0 = ðmac;
};
dioo2133: audio-amplifier-0 {
compatible = "simple-audio-amplifier";
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
VCC-supply = <&vcc_5v>;
sound-name-prefix = "10U2";
};
spdif_dir: audio-codec-0 {
compatible = "linux,spdif-dir";
#sound-dai-cells = <0>;
sound-name-prefix = "DIR";
};
spdif_dit: audio-codec-1 {
compatible = "linux,spdif-dit";
#sound-dai-cells = <0>;
sound-name-prefix = "DIT";
};
chosen {
stdout-path = "serial0:115200n8";
};
cvbs-connector {
compatible = "composite-video-connector";
port {
cvbs_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
emmc_pwrseq: emmc-pwrseq {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
};
hdmi-connector {
compatible = "hdmi-connector";
type = "a";
port {
hdmi_connector_in: endpoint {
remote-endpoint = <&hdmi_tx_tmds_out>;
};
};
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
flash_1v8: regulator-flash-1v8 {
compatible = "regulator-fixed";
regulator-name = "FLASH_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vin-supply = <&vcc_3v3>;
regulator-always-on;
};
main_12v: regulator-main-12v {
compatible = "regulator-fixed";
regulator-name = "12V";
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
regulator-always-on;
};
usb_pwr_en: regulator-usb-pwr-en {
compatible = "regulator-fixed";
regulator-name = "USB_PWR_EN";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
vin-supply = <&vcc_5v>;
gpio = <&gpio GPIOH_6 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
vcc_1v8: regulator-vcc-1v8 {
compatible = "regulator-fixed";
regulator-name = "VCC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vin-supply = <&vcc_3v3>;
regulator-always-on;
};
vcc_3v3: regulator-vcc-3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vddao_3v3>;
regulator-always-on;
/* FIXME: actually controlled by VDDCPU_B_EN */
};
vcc_5v: regulator-vcc-5v {
compatible = "regulator-fixed";
regulator-name = "VCC_5V";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
vin-supply = <&main_12v>;
gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
enable-active-high;
};
vddao_1v8: regulator-vddao-1v8 {
compatible = "regulator-fixed";
regulator-name = "VDDAO_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vin-supply = <&vddao_3v3>;
regulator-always-on;
};
vddao_3v3: regulator-vddao-3v3 {
compatible = "regulator-fixed";
regulator-name = "VDDAO_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&main_12v>;
regulator-always-on;
};
vddcpu: regulator-vddcpu {
/*
* MP8756GD Regulator.
*/
compatible = "pwm-regulator";
regulator-name = "VDDCPU";
regulator-min-microvolt = <721000>;
regulator-max-microvolt = <1022000>;
pwm-supply = <&main_12v>;
pwms = <&pwm_AO_cd 1 1250 0>;
pwm-dutycycle-range = <100 0>;
regulator-boot-on;
regulator-always-on;
};
sound {
compatible = "amlogic,axg-sound-card";
model = "U200";
audio-widgets = "Line", "Lineout";
audio-aux-devs = <&tdmout_a>, <&tdmout_b>, <&tdmout_c>,
<&tdmin_a>, <&tdmin_b>, <&tdmin_c>,
<&tdmin_lb>, <&dioo2133>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
"TDMOUT_A IN 1", "FRDDR_B OUT 0",
"TDMOUT_A IN 2", "FRDDR_C OUT 0",
"TDM_A Playback", "TDMOUT_A OUT",
"TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT",
"TDMOUT_C IN 0", "FRDDR_A OUT 2",
"TDMOUT_C IN 1", "FRDDR_B OUT 2",
"TDMOUT_C IN 2", "FRDDR_C OUT 2",
"TDM_C Playback", "TDMOUT_C OUT",
"SPDIFOUT_A IN 0", "FRDDR_A OUT 3",
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3",
"SPDIFOUT_B IN 0", "FRDDR_A OUT 4",
"SPDIFOUT_B IN 1", "FRDDR_B OUT 4",
"SPDIFOUT_B IN 2", "FRDDR_C OUT 4",
"TDMIN_A IN 0", "TDM_A Capture",
"TDMIN_A IN 1", "TDM_B Capture",
"TDMIN_A IN 2", "TDM_C Capture",
"TDMIN_A IN 3", "TDM_A Loopback",
"TDMIN_A IN 4", "TDM_B Loopback",
"TDMIN_A IN 5", "TDM_C Loopback",
"TDMIN_B IN 0", "TDM_A Capture",
"TDMIN_B IN 1", "TDM_B Capture",
"TDMIN_B IN 2", "TDM_C Capture",
"TDMIN_B IN 3", "TDM_A Loopback",
"TDMIN_B IN 4", "TDM_B Loopback",
"TDMIN_B IN 5", "TDM_C Loopback",
"TDMIN_C IN 0", "TDM_A Capture",
"TDMIN_C IN 1", "TDM_B Capture",
"TDMIN_C IN 2", "TDM_C Capture",
"TDMIN_C IN 3", "TDM_A Loopback",
"TDMIN_C IN 4", "TDM_B Loopback",
"TDMIN_C IN 5", "TDM_C Loopback",
"TDMIN_LB IN 3", "TDM_A Capture",
"TDMIN_LB IN 4", "TDM_B Capture",
"TDMIN_LB IN 5", "TDM_C Capture",
"TDMIN_LB IN 0", "TDM_A Loopback",
"TDMIN_LB IN 1", "TDM_B Loopback",
"TDMIN_LB IN 2", "TDM_C Loopback",
"TODDR_A IN 0", "TDMIN_A OUT",
"TODDR_B IN 0", "TDMIN_A OUT",
"TODDR_C IN 0", "TDMIN_A OUT",
"TODDR_A IN 1", "TDMIN_B OUT",
"TODDR_B IN 1", "TDMIN_B OUT",
"TODDR_C IN 1", "TDMIN_B OUT",
"TODDR_A IN 2", "TDMIN_C OUT",
"TODDR_B IN 2", "TDMIN_C OUT",
"TODDR_C IN 2", "TDMIN_C OUT",
"TODDR_A IN 3", "SPDIFIN Capture",
"TODDR_B IN 3", "SPDIFIN Capture",
"TODDR_C IN 3", "SPDIFIN Capture",
"TODDR_A IN 6", "TDMIN_LB OUT",
"TODDR_B IN 6", "TDMIN_LB OUT",
"TODDR_C IN 6", "TDMIN_LB OUT",
"10U2 INL", "ACODEC LOLP",
"10U2 INR", "ACODEC LORP",
"Lineout", "10U2 OUTL",
"Lineout", "10U2 OUTR";
clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
assigned-clock-parents = <0>, <0>, <0>;
assigned-clock-rates = <294912000>,
<270950400>,
<393216000>;
dai-link-0 {
sound-dai = <&frddr_a>;
};
dai-link-1 {
sound-dai = <&frddr_b>;
};
dai-link-2 {
sound-dai = <&frddr_c>;
};
dai-link-3 {
sound-dai = <&toddr_a>;
};
dai-link-4 {
sound-dai = <&toddr_b>;
};
dai-link-5 {
sound-dai = <&toddr_c>;
};
/* Connected to the WIFI/BT chip */
dai-link-6 {
sound-dai = <&tdmif_a>;
dai-format = "dsp_a";
dai-tdm-slot-tx-mask-0 = <1 1>;
mclk-fs = <256>;
codec-0 {
sound-dai = <&toacodec TOACODEC_IN_A>;
};
codec-1 {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_A>;
};
};
/* Connected to the onboard AD82584F DAC */
dai-link-7 {
sound-dai = <&tdmif_b>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
mclk-fs = <256>;
codec-0 {
sound-dai = <&toacodec TOACODEC_IN_B>;
};
codec-1 {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
};
};
/* 8ch HDMI interface */
dai-link-8 {
sound-dai = <&tdmif_c>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
dai-tdm-slot-tx-mask-1 = <1 1>;
dai-tdm-slot-tx-mask-2 = <1 1>;
dai-tdm-slot-tx-mask-3 = <1 1>;
mclk-fs = <256>;
codec-0 {
sound-dai = <&toacodec TOACODEC_IN_C>;
};
codec-1 {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_C>;
};
};
/* spdif hdmi and coax output */
dai-link-9 {
sound-dai = <&spdifout_a>;
codec-0 {
sound-dai = <&spdif_dit>;
};
codec-1 {
sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_A>;
};
};
/* spdif hdmi interface */
dai-link-10 {
sound-dai = <&spdifout_b>;
codec {
sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_B>;
};
};
/* hdmi glue */
dai-link-11 {
sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
codec {
sound-dai = <&hdmi_tx>;
};
};
/* internal codec glue */
dai-link-12 {
sound-dai = <&toacodec TOACODEC_OUT>;
codec {
sound-dai = <&acodec>;
};
};
/* spdif coax input */
dai-link-13 {
sound-dai = <&spdifin>;
codec {
sound-dai = <&spdif_dir>;
};
};
};
};
&acodec {
AVDD-supply = <&vddao_1v8>;
status = "okay";
};
&arb {
status = "okay";
};
&cec_AO {
pinctrl-0 = <&cec_ao_a_h_pins>;
pinctrl-names = "default";
status = "disabled";
hdmi-phandle = <&hdmi_tx>;
};
&cecb_AO {
pinctrl-0 = <&cec_ao_b_h_pins>;
pinctrl-names = "default";
status = "okay";
hdmi-phandle = <&hdmi_tx>;
};
&clkc_audio {
status = "okay";
};
&cpu0 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&clkc_audio {
status = "okay";
};
&cvbs_vdac_port {
cvbs_vdac_out: endpoint {
remote-endpoint = <&cvbs_connector_in>;
};
};
ðmac {
status = "okay";
phy-handle = <&internal_ephy>;
phy-mode = "rmii";
};
&frddr_a {
status = "okay";
};
&frddr_b {
status = "okay";
};
&frddr_c {
status = "okay";
};
&hdmi_tx {
status = "okay";
pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
pinctrl-names = "default";
hdmi-supply = <&vcc_5v>;
};
&hdmi_tx_tmds_port {
hdmi_tx_tmds_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
&ir {
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
pinctrl-names = "default";
};
/* i2c Touch */
&i2c0 {
status = "okay";
pinctrl-0 = <&i2c0_sda_z0_pins>, <&i2c0_sck_z1_pins>;
pinctrl-names = "default";
};
/* i2c CM */
&i2c2 {
status = "okay";
pinctrl-0 = <&i2c2_sda_z_pins>, <&i2c2_sck_z_pins>;
pinctrl-names = "default";
};
/* i2c Audio */
&i2c3 {
status = "okay";
pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
pinctrl-names = "default";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin1";
status = "okay";
};
/* SD card */
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_c_pins>;
pinctrl-1 = <&sdcard_clk_gate_c_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
max-frequency = <50000000>;
disable-wp;
cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddao_3v3>;
};
/* eMMC */
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <8>;
cap-mmc-highspeed;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
max-frequency = <200000000>;
non-removable;
disable-wp;
mmc-pwrseq = <&emmc_pwrseq>;
vmmc-supply = <&vcc_3v3>;
vqmmc-supply = <&flash_1v8>;
};
&spdifin {
pinctrl-0 = <&spdif_in_h_pins>;
pinctrl-names = "default";
status = "okay";
};
&spdifout_a {
pinctrl-0 = <&spdif_ao_out_pins>;
pinctrl-names = "default";
status = "okay";
};
&spdifout_b {
status = "okay";
};
&tdmif_a {
pinctrl-0 = <&tdm_a_fs_pins>, <&tdm_a_sclk_pins>, <&tdm_a_dout0_pins> ;
pinctrl-names = "default";
status = "okay";
};
&tdmif_b {
pinctrl-0 = <&mclk0_a_pins>, <&tdm_b_fs_pins>, <&tdm_b_sclk_pins>,
<&tdm_b_dout0_pins>;
pinctrl-names = "default";
status = "okay";
assigned-clocks = <&clkc_audio AUD_CLKID_TDM_MCLK_PAD0>,
<&clkc_audio AUD_CLKID_TDM_SCLK_PAD1>,
<&clkc_audio AUD_CLKID_TDM_LRCLK_PAD1>;
assigned-clock-parents = <&clkc_audio AUD_CLKID_MST_B_MCLK>,
<&clkc_audio AUD_CLKID_MST_B_SCLK>,
<&clkc_audio AUD_CLKID_MST_B_LRCLK>;
assigned-clock-rates = <0>, <0>, <0>;
};
&tdmif_c {
status = "okay";
};
&tdmin_a {
status = "okay";
};
&tdmin_b {
status = "okay";
};
&tdmin_c {
status = "okay";
};
&tdmin_lb {
status = "okay";
};
&tdmout_a {
status = "okay";
};
&tdmout_b {
status = "okay";
};
&tdmout_c {
status = "okay";
};
&toacodec {
status = "okay";
};
&toddr_a {
status = "okay";
};
&toddr_b {
status = "okay";
};
&toddr_c {
status = "okay";
};
&tohdmitx {
status = "okay";
};
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb {
status = "okay";
vbus-supply = <&usb_pwr_en>;
};
&usb2_phy0 {
phy-supply = <&vcc_5v>;
};
&usb2_phy1 {
phy-supply = <&vcc_5v>;
};
|
// SPDX-License-Identifier: GPL-2.0-only
/* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0
* DVB-T receiver.
*
* Copyright (C) 2004-5 Patrick Boettcher ([email protected])
*
* Thanks to Twinhan who kindly provided hardware and information.
*
* see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "vp7045.h"
/* It is a Zarlink MT352 within a Samsung Tuner (DNOS404ZH102A) - 040929 - AAT
*
* Programming is hidden inside the firmware, so set_frontend is very easy.
* Even though there is a Firmware command that one can use to access the demod
* via its registers. This is used for status information.
*/
struct vp7045_fe_state {
struct dvb_frontend fe;
struct dvb_usb_device *d;
};
static int vp7045_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
u8 s0 = vp7045_read_reg(state->d,0x00),
s1 = vp7045_read_reg(state->d,0x01),
s3 = vp7045_read_reg(state->d,0x03);
*status = 0;
if (s0 & (1 << 4))
*status |= FE_HAS_CARRIER;
if (s0 & (1 << 1))
*status |= FE_HAS_VITERBI;
if (s0 & (1 << 5))
*status |= FE_HAS_LOCK;
if (s1 & (1 << 1))
*status |= FE_HAS_SYNC;
if (s3 & (1 << 6))
*status |= FE_HAS_SIGNAL;
if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
(FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
*status &= ~FE_HAS_LOCK;
return 0;
}
static int vp7045_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
*ber = (vp7045_read_reg(state->d, 0x0D) << 16) |
(vp7045_read_reg(state->d, 0x0E) << 8) |
vp7045_read_reg(state->d, 0x0F);
return 0;
}
static int vp7045_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
*unc = (vp7045_read_reg(state->d, 0x10) << 8) |
vp7045_read_reg(state->d, 0x11);
return 0;
}
static int vp7045_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
u16 signal = (vp7045_read_reg(state->d, 0x14) << 8) |
vp7045_read_reg(state->d, 0x15);
*strength = ~signal;
return 0;
}
static int vp7045_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
u8 _snr = vp7045_read_reg(state->d, 0x09);
*snr = (_snr << 8) | _snr;
return 0;
}
static int vp7045_fe_init(struct dvb_frontend* fe)
{
return 0;
}
static int vp7045_fe_sleep(struct dvb_frontend* fe)
{
return 0;
}
static int vp7045_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 800;
return 0;
}
static int vp7045_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct vp7045_fe_state *state = fe->demodulator_priv;
u8 buf[5];
u32 freq = fep->frequency / 1000;
buf[0] = (freq >> 16) & 0xff;
buf[1] = (freq >> 8) & 0xff;
buf[2] = freq & 0xff;
buf[3] = 0;
switch (fep->bandwidth_hz) {
case 8000000:
buf[4] = 8;
break;
case 7000000:
buf[4] = 7;
break;
case 6000000:
buf[4] = 6;
break;
default:
return -EINVAL;
}
vp7045_usb_op(state->d,LOCK_TUNER_COMMAND,buf,5,NULL,0,200);
return 0;
}
static void vp7045_fe_release(struct dvb_frontend* fe)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
kfree(state);
}
static const struct dvb_frontend_ops vp7045_fe_ops;
struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
{
struct vp7045_fe_state *s = kzalloc(sizeof(struct vp7045_fe_state), GFP_KERNEL);
if (s == NULL)
goto error;
s->d = d;
memcpy(&s->fe.ops, &vp7045_fe_ops, sizeof(struct dvb_frontend_ops));
s->fe.demodulator_priv = s;
return &s->fe;
error:
return NULL;
}
static const struct dvb_frontend_ops vp7045_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
.frequency_min_hz = 44250 * kHz,
.frequency_max_hz = 867250 * kHz,
.frequency_stepsize_hz = 1 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_RECOVER |
FE_CAN_HIERARCHY_AUTO,
},
.release = vp7045_fe_release,
.init = vp7045_fe_init,
.sleep = vp7045_fe_sleep,
.set_frontend = vp7045_fe_set_frontend,
.get_tune_settings = vp7045_fe_get_tune_settings,
.read_status = vp7045_fe_read_status,
.read_ber = vp7045_fe_read_ber,
.read_signal_strength = vp7045_fe_read_signal_strength,
.read_snr = vp7045_fe_read_snr,
.read_ucblocks = vp7045_fe_read_unc_blocks,
};
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019 Google Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Provides a simple driver to control the ASPEED P2A interface which allows
* the host to read and write to various regions of the BMC's memory.
*/
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/aspeed-p2a-ctrl.h>
#define DEVICE_NAME "aspeed-p2a-ctrl"
/* SCU2C is a Misc. Control Register. */
#define SCU2C 0x2c
/* SCU180 is the PCIe Configuration Setting Control Register. */
#define SCU180 0x180
/* Bit 1 controls the P2A bridge, while bit 0 controls the entire VGA device
* on the PCI bus.
*/
#define SCU180_ENP2A BIT(1)
/* The ast2400/2500 both have six ranges. */
#define P2A_REGION_COUNT 6
struct region {
u64 min;
u64 max;
u32 bit;
};
struct aspeed_p2a_model_data {
/* min, max, bit */
struct region regions[P2A_REGION_COUNT];
};
struct aspeed_p2a_ctrl {
struct miscdevice miscdev;
struct regmap *regmap;
const struct aspeed_p2a_model_data *config;
/* Access to these needs to be locked, held via probe, mapping ioctl,
* and release, remove.
*/
struct mutex tracking;
u32 readers;
u32 readerwriters[P2A_REGION_COUNT];
phys_addr_t mem_base;
resource_size_t mem_size;
};
struct aspeed_p2a_user {
struct file *file;
struct aspeed_p2a_ctrl *parent;
/* The entire memory space is opened for reading once the bridge is
* enabled, therefore this needs only to be tracked once per user.
* If any user has it open for read, the bridge must stay enabled.
*/
u32 read;
/* Each entry of the array corresponds to a P2A Region. If the user
* opens for read or readwrite, the reference goes up here. On
* release, this array is walked and references adjusted accordingly.
*/
u32 readwrite[P2A_REGION_COUNT];
};
static void aspeed_p2a_enable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
{
regmap_update_bits(p2a_ctrl->regmap,
SCU180, SCU180_ENP2A, SCU180_ENP2A);
}
static void aspeed_p2a_disable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
{
regmap_update_bits(p2a_ctrl->regmap, SCU180, SCU180_ENP2A, 0);
}
static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long vsize;
pgprot_t prot;
struct aspeed_p2a_user *priv = file->private_data;
struct aspeed_p2a_ctrl *ctrl = priv->parent;
if (ctrl->mem_base == 0 && ctrl->mem_size == 0)
return -EINVAL;
vsize = vma->vm_end - vma->vm_start;
prot = vma->vm_page_prot;
if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
return -EINVAL;
/* ast2400/2500 AHB accesses are not cache coherent */
prot = pgprot_noncached(prot);
if (remap_pfn_range(vma, vma->vm_start,
(ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
vsize, prot))
return -EAGAIN;
return 0;
}
static bool aspeed_p2a_region_acquire(struct aspeed_p2a_user *priv,
struct aspeed_p2a_ctrl *ctrl,
struct aspeed_p2a_ctrl_mapping *map)
{
int i;
u64 base, end;
bool matched = false;
base = map->addr;
end = map->addr + (map->length - 1);
/* If the value is a legal u32, it will find a match. */
for (i = 0; i < P2A_REGION_COUNT; i++) {
const struct region *curr = &ctrl->config->regions[i];
/* If the top of this region is lower than your base, skip it.
*/
if (curr->max < base)
continue;
/* If the bottom of this region is higher than your end, bail.
*/
if (curr->min > end)
break;
/* Lock this and update it, therefore it someone else is
* closing their file out, this'll preserve the increment.
*/
mutex_lock(&ctrl->tracking);
ctrl->readerwriters[i] += 1;
mutex_unlock(&ctrl->tracking);
/* Track with the user, so when they close their file, we can
* decrement properly.
*/
priv->readwrite[i] += 1;
/* Enable the region as read-write. */
regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
matched = true;
}
return matched;
}
static long aspeed_p2a_ioctl(struct file *file, unsigned int cmd,
unsigned long data)
{
struct aspeed_p2a_user *priv = file->private_data;
struct aspeed_p2a_ctrl *ctrl = priv->parent;
void __user *arg = (void __user *)data;
struct aspeed_p2a_ctrl_mapping map;
if (copy_from_user(&map, arg, sizeof(map)))
return -EFAULT;
switch (cmd) {
case ASPEED_P2A_CTRL_IOCTL_SET_WINDOW:
/* If they want a region to be read-only, since the entire
* region is read-only once enabled, we just need to track this
* user wants to read from the bridge, and if it's not enabled.
* Enable it.
*/
if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
mutex_lock(&ctrl->tracking);
ctrl->readers += 1;
mutex_unlock(&ctrl->tracking);
/* Track with the user, so when they close their file,
* we can decrement properly.
*/
priv->read += 1;
} else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
/* If we don't acquire any region return error. */
if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
return -EINVAL;
}
} else {
/* Invalid map flags. */
return -EINVAL;
}
aspeed_p2a_enable_bridge(ctrl);
return 0;
case ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG:
/* This is a request for the memory-region and corresponding
* length that is used by the driver for mmap.
*/
map.flags = 0;
map.addr = ctrl->mem_base;
map.length = ctrl->mem_size;
return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
}
return -EINVAL;
}
/*
* When a user opens this file, we create a structure to track their mappings.
*
* A user can map a region as read-only (bridge enabled), or read-write (bit
* flipped, and bridge enabled). Either way, this tracking is used, s.t. when
* they release the device references are handled.
*
* The bridge is not enabled until a user calls an ioctl to map a region,
* simply opening the device does not enable it.
*/
static int aspeed_p2a_open(struct inode *inode, struct file *file)
{
struct aspeed_p2a_user *priv;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->file = file;
priv->read = 0;
memset(priv->readwrite, 0, sizeof(priv->readwrite));
/* The file's private_data is initialized to the p2a_ctrl. */
priv->parent = file->private_data;
/* Set the file's private_data to the user's data. */
file->private_data = priv;
return 0;
}
/*
* This will close the users mappings. It will go through what they had opened
* for readwrite, and decrement those counts. If at the end, this is the last
* user, it'll close the bridge.
*/
static int aspeed_p2a_release(struct inode *inode, struct file *file)
{
int i;
u32 bits = 0;
bool open_regions = false;
struct aspeed_p2a_user *priv = file->private_data;
/* Lock others from changing these values until everything is updated
* in one pass.
*/
mutex_lock(&priv->parent->tracking);
priv->parent->readers -= priv->read;
for (i = 0; i < P2A_REGION_COUNT; i++) {
priv->parent->readerwriters[i] -= priv->readwrite[i];
if (priv->parent->readerwriters[i] > 0)
open_regions = true;
else
bits |= priv->parent->config->regions[i].bit;
}
/* Setting a bit to 1 disables the region, so let's just OR with the
* above to disable any.
*/
/* Note, if another user is trying to ioctl, they can't grab tracking,
* and therefore can't grab either register mutex.
* If another user is trying to close, they can't grab tracking either.
*/
regmap_update_bits(priv->parent->regmap, SCU2C, bits, bits);
/* If parent->readers is zero and open windows is 0, disable the
* bridge.
*/
if (!open_regions && priv->parent->readers == 0)
aspeed_p2a_disable_bridge(priv->parent);
mutex_unlock(&priv->parent->tracking);
kfree(priv);
return 0;
}
static const struct file_operations aspeed_p2a_ctrl_fops = {
.owner = THIS_MODULE,
.mmap = aspeed_p2a_mmap,
.unlocked_ioctl = aspeed_p2a_ioctl,
.open = aspeed_p2a_open,
.release = aspeed_p2a_release,
};
/* The regions are controlled by SCU2C */
static void aspeed_p2a_disable_all(struct aspeed_p2a_ctrl *p2a_ctrl)
{
int i;
u32 value = 0;
for (i = 0; i < P2A_REGION_COUNT; i++)
value |= p2a_ctrl->config->regions[i].bit;
regmap_update_bits(p2a_ctrl->regmap, SCU2C, value, value);
/* Disable the bridge. */
aspeed_p2a_disable_bridge(p2a_ctrl);
}
static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
{
struct aspeed_p2a_ctrl *misc_ctrl;
struct device *dev;
struct resource resm;
struct device_node *node;
int rc = 0;
dev = &pdev->dev;
misc_ctrl = devm_kzalloc(dev, sizeof(*misc_ctrl), GFP_KERNEL);
if (!misc_ctrl)
return -ENOMEM;
mutex_init(&misc_ctrl->tracking);
/* optional. */
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
rc = of_address_to_resource(node, 0, &resm);
of_node_put(node);
if (rc) {
dev_err(dev, "Couldn't address to resource for reserved memory\n");
return -ENODEV;
}
misc_ctrl->mem_size = resource_size(&resm);
misc_ctrl->mem_base = resm.start;
}
misc_ctrl->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(misc_ctrl->regmap)) {
dev_err(dev, "Couldn't get regmap\n");
return -ENODEV;
}
misc_ctrl->config = of_device_get_match_data(dev);
dev_set_drvdata(&pdev->dev, misc_ctrl);
aspeed_p2a_disable_all(misc_ctrl);
misc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
misc_ctrl->miscdev.name = DEVICE_NAME;
misc_ctrl->miscdev.fops = &aspeed_p2a_ctrl_fops;
misc_ctrl->miscdev.parent = dev;
rc = misc_register(&misc_ctrl->miscdev);
if (rc)
dev_err(dev, "Unable to register device\n");
return rc;
}
static void aspeed_p2a_ctrl_remove(struct platform_device *pdev)
{
struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
misc_deregister(&p2a_ctrl->miscdev);
}
#define SCU2C_DRAM BIT(25)
#define SCU2C_SPI BIT(24)
#define SCU2C_SOC BIT(23)
#define SCU2C_FLASH BIT(22)
static const struct aspeed_p2a_model_data ast2400_model_data = {
.regions = {
{0x00000000, 0x17FFFFFF, SCU2C_FLASH},
{0x18000000, 0x1FFFFFFF, SCU2C_SOC},
{0x20000000, 0x2FFFFFFF, SCU2C_FLASH},
{0x30000000, 0x3FFFFFFF, SCU2C_SPI},
{0x40000000, 0x5FFFFFFF, SCU2C_DRAM},
{0x60000000, 0xFFFFFFFF, SCU2C_SOC},
}
};
static const struct aspeed_p2a_model_data ast2500_model_data = {
.regions = {
{0x00000000, 0x0FFFFFFF, SCU2C_FLASH},
{0x10000000, 0x1FFFFFFF, SCU2C_SOC},
{0x20000000, 0x3FFFFFFF, SCU2C_FLASH},
{0x40000000, 0x5FFFFFFF, SCU2C_SOC},
{0x60000000, 0x7FFFFFFF, SCU2C_SPI},
{0x80000000, 0xFFFFFFFF, SCU2C_DRAM},
}
};
static const struct of_device_id aspeed_p2a_ctrl_match[] = {
{ .compatible = "aspeed,ast2400-p2a-ctrl",
.data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-p2a-ctrl",
.data = &ast2500_model_data },
{ },
};
static struct platform_driver aspeed_p2a_ctrl_driver = {
.driver = {
.name = DEVICE_NAME,
.of_match_table = aspeed_p2a_ctrl_match,
},
.probe = aspeed_p2a_ctrl_probe,
.remove = aspeed_p2a_ctrl_remove,
};
module_platform_driver(aspeed_p2a_ctrl_driver);
MODULE_DEVICE_TABLE(of, aspeed_p2a_ctrl_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick Venture <[email protected]>");
MODULE_DESCRIPTION("Control for aspeed 2400/2500 P2A VGA HOST to BMC mappings");
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <dt-bindings/thermal/thermal.h>
thermal_zones: thermal-zones {
main0_thermal: main0-thermal {
polling-delay-passive = <250>; /* milliSeconds */
polling-delay = <500>; /* milliSeconds */
thermal-sensors = <&wkup_vtm0 0>;
trips {
main0_crit: main0-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
};
main1_thermal: main1-thermal {
polling-delay-passive = <250>; /* milliSeconds */
polling-delay = <500>; /* milliSeconds */
thermal-sensors = <&wkup_vtm0 1>;
trips {
main1_crit: main1-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
};
main2_thermal: main2-thermal {
polling-delay-passive = <250>; /* milliSeconds */
polling-delay = <500>; /* milliSeconds */
thermal-sensors = <&wkup_vtm0 2>;
trips {
main2_crit: main2-crit {
temperature = <125000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
};
};
|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Apple iPad mini 3 (Wi-Fi), J85m, iPad4,7 (A1599)
* Copyright (c) 2022, Konrad Dybcio <[email protected]>
*/
/dts-v1/;
#include "s5l8960x-mini3.dtsi"
/ {
compatible = "apple,j85m", "apple,s5l8960x", "apple,arm-platform";
model = "Apple iPad mini 3 (Wi-Fi)";
};
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Copyright (c) 2005 Linas Vepstas <[email protected]>
*/
#ifndef ASM_POWERPC_EEH_EVENT_H
#define ASM_POWERPC_EEH_EVENT_H
#ifdef __KERNEL__
/*
* structure holding pci controller data that describes a
* change in the isolation status of a PCI slot. A pointer
* to this struct is passed as the data pointer in a notify
* callback.
*/
struct eeh_event {
struct list_head list; /* to form event queue */
struct eeh_pe *pe; /* EEH PE */
};
int eeh_event_init(void);
int eeh_send_failure_event(struct eeh_pe *pe);
int __eeh_send_failure_event(struct eeh_pe *pe);
void eeh_remove_event(struct eeh_pe *pe, bool force);
void eeh_handle_normal_event(struct eeh_pe *pe);
void eeh_handle_special_event(void);
#endif /* __KERNEL__ */
#endif /* ASM_POWERPC_EEH_EVENT_H */
|
#define BTF_TYPES \
.btf_strings = "\0int\0i\0ctx\0callback\0main\0", \
.btf_types = { \
/* 1: int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), \
/* 2: int* */ BTF_PTR_ENC(1), \
/* 3: void* */ BTF_PTR_ENC(0), \
/* 4: int __(void*) */ BTF_FUNC_PROTO_ENC(1, 1), \
BTF_FUNC_PROTO_ARG_ENC(7, 3), \
/* 5: int __(int, int*) */ BTF_FUNC_PROTO_ENC(1, 2), \
BTF_FUNC_PROTO_ARG_ENC(5, 1), \
BTF_FUNC_PROTO_ARG_ENC(7, 2), \
/* 6: main */ BTF_FUNC_ENC(20, 4), \
/* 7: callback */ BTF_FUNC_ENC(11, 5), \
BTF_END_RAW \
}
#define MAIN_TYPE 6
#define CALLBACK_TYPE 7
/* can't use BPF_CALL_REL, jit_subprogs adjusts IMM & OFF
* fields for pseudo calls
*/
#define PSEUDO_CALL_INSN() \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_CALL, \
INSN_OFF_MASK, INSN_IMM_MASK)
/* can't use BPF_FUNC_loop constant,
* do_mix_fixups adjusts the IMM field
*/
#define HELPER_CALL_INSN() \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, INSN_OFF_MASK, INSN_IMM_MASK)
{
"inline simple bpf_loop call",
.insns = {
/* main */
/* force verifier state branching to verify logic on first and
* subsequent bpf_loop insn processing steps
*/
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 2),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
{
"don't inline bpf_loop call, flags non-zero",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 9),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 7),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -10),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
{
"don't inline bpf_loop call, callback non-constant",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 4), /* pick a random callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 10),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* callback #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
{ 0, MAIN_TYPE },
{ 14, CALLBACK_TYPE },
{ 16, CALLBACK_TYPE }
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"bpf_loop_inline and a dead func",
.insns = {
/* main */
/* A reference to callback #1 to make verifier count it as a func.
* This reference is overwritten below and callback #1 is dead.
*/
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 9),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* callback #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
{ 0, MAIN_TYPE },
{ 10, CALLBACK_TYPE },
{ 12, CALLBACK_TYPE }
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"bpf_loop_inline stack locations for loop vars",
.insns = {
/* main */
BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
/* bpf_loop call #1 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 22),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
/* bpf_loop call #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 16),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
/* call func and exit */
BPF_CALL_REL(2),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* func */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
SKIP_INSNS(),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
SKIP_INSNS(),
/* offsets are the same as in the first call */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
SKIP_INSNS(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
SKIP_INSNS(),
/* offsets differ from main because of different offset
* in BPF_ST_MEM instruction
*/
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -40),
},
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.func_info = {
{ 0, MAIN_TYPE },
{ 16, MAIN_TYPE },
{ 25, CALLBACK_TYPE },
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"inline bpf_loop call in a big program",
.insns = {},
.fill_helper = bpf_fill_big_prog_with_loop_1,
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.flags = F_NEEDS_JIT_ENABLED,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
#undef HELPER_CALL_INSN
#undef PSEUDO_CALL_INSN
#undef CALLBACK_TYPE
#undef MAIN_TYPE
#undef BTF_TYPES
|
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#include "system_global.h"
#include "ia_css_types.h"
#include "ia_css_macc_table.host.h"
/* Multi-Axes Color Correction table for ISP1.
* 64values = 2x2matrix for 16area, [s2.13]
* ineffective: 16 of "identity 2x2 matrix" {8192,0,0,8192}
*/
const struct ia_css_macc_table default_macc_table = {
{
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192,
8192, 0, 0, 8192, 8192, 0, 0, 8192
}
};
/* Multi-Axes Color Correction table for ISP2.
* 64values = 2x2matrix for 16area, [s1.12]
* ineffective: 16 of "identity 2x2 matrix" {4096,0,0,4096}
*/
const struct ia_css_macc_table default_macc2_table = {
{
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096,
4096, 0, 0, 4096, 4096, 0, 0, 4096
}
};
|
/*
* platinumfb.c -- frame buffer device for the PowerMac 'platinum' display
*
* Copyright (C) 1998 Franz Sirl
*
* Frame buffer structure from:
* drivers/video/controlfb.c -- frame buffer device for
* Apple 'control' display chip.
* Copyright (C) 1998 Dan Jacobowitz
*
* Hardware information from:
* platinum.c: Console support for PowerMac "platinum" display adaptor.
* Copyright (C) 1996 Paul Mackerras and Mark Abene
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/nvram.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "macmodes.h"
#include "platinumfb.h"
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
struct fb_info_platinum {
struct fb_info *info;
int vmode, cmode;
int xres, yres;
int vxres, vyres;
int xoffset, yoffset;
struct {
__u8 red, green, blue;
} palette[256];
u32 pseudo_palette[16];
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
volatile struct platinum_regs __iomem *platinum_regs;
unsigned long platinum_regs_phys;
__u8 __iomem *frame_buffer;
volatile __u8 __iomem *base_frame_buffer;
unsigned long frame_buffer_phys;
unsigned long total_vram;
int clktype;
int dactype;
struct resource rsrc_fb, rsrc_reg;
};
/*
* Frame buffer device API
*/
static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int platinumfb_blank(int blank_mode, struct fb_info *info);
static int platinumfb_set_par (struct fb_info *info);
static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
/*
* internal functions
*/
static inline int platinum_vram_reqd(int video_mode, int color_mode);
static int read_platinum_sense(struct fb_info_platinum *pinfo);
static void set_platinum_clock(struct fb_info_platinum *pinfo);
static void platinum_set_hardware(struct fb_info_platinum *pinfo);
static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only);
/*
* Interface used by the world
*/
static const struct fb_ops platinumfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = platinumfb_check_var,
.fb_set_par = platinumfb_set_par,
.fb_setcolreg = platinumfb_setcolreg,
.fb_blank = platinumfb_blank,
};
/*
* Checks a var structure
*/
static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
{
return platinum_var_to_par(var, info->par, 1);
}
/*
* Applies current var to display
*/
static int platinumfb_set_par (struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
struct platinum_regvals *init;
int err, offset = 0x20;
if((err = platinum_var_to_par(&info->var, pinfo, 0))) {
printk (KERN_ERR "platinumfb_set_par: error calling"
" platinum_var_to_par: %d.\n", err);
return err;
}
platinum_set_hardware(pinfo);
init = platinum_reg_init[pinfo->vmode-1];
if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8))
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
+ offset;
printk("line_length: %x\n", info->fix.line_length);
return 0;
}
static int platinumfb_blank(int blank, struct fb_info *fb)
{
/*
* Blank the screen if blank_mode != 0, else unblank. If blank == NULL
* then the caller blanks by setting the CLUT (Color Look Up Table) to all
* black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due
* to e.g. a video mode which doesn't support it. Implements VESA suspend
* and powerdown modes on hardware that supports disabling hsync/vsync:
* blank_mode == 2: suspend vsync
* blank_mode == 3: suspend hsync
* blank_mode == 4: powerdown
*/
/* [danj] I think there's something fishy about those constants... */
/*
struct fb_info_platinum *info = (struct fb_info_platinum *) fb;
int ctrl;
ctrl = le32_to_cpup(&info->platinum_regs->ctrl.r) | 0x33;
if (blank)
--blank_mode;
if (blank & VESA_VSYNC_SUSPEND)
ctrl &= ~3;
if (blank & VESA_HSYNC_SUSPEND)
ctrl &= ~0x30;
out_le32(&info->platinum_regs->ctrl.r, ctrl);
*/
/* TODO: Figure out how the heck to powerdown this thing! */
return 0;
}
static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
if (regno > 255)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
pinfo->palette[regno].red = red;
pinfo->palette[regno].green = green;
pinfo->palette[regno].blue = blue;
out_8(&cmap_regs->addr, regno); /* tell clut what addr to fill */
out_8(&cmap_regs->lut, red); /* send one color channel at */
out_8(&cmap_regs->lut, green); /* a time... */
out_8(&cmap_regs->lut, blue);
if (regno < 16) {
int i;
u32 *pal = info->pseudo_palette;
switch (pinfo->cmode) {
case CMODE_16:
pal[regno] = (regno << 10) | (regno << 5) | regno;
break;
case CMODE_32:
i = (regno << 8) | regno;
pal[regno] = (i << 16) | i;
break;
}
}
return 0;
}
static inline int platinum_vram_reqd(int video_mode, int color_mode)
{
int baseval = vmode_attrs[video_mode-1].hres * (1<<color_mode);
if ((video_mode == VMODE_832_624_75) && (color_mode > CMODE_8))
baseval += 0x10;
else
baseval += 0x20;
return vmode_attrs[video_mode-1].vres * baseval + 0x1000;
}
#define STORE_D2(a, d) { \
out_8(&cmap_regs->addr, (a+32)); \
out_8(&cmap_regs->d2, (d)); \
}
static void set_platinum_clock(struct fb_info_platinum *pinfo)
{
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
struct platinum_regvals *init;
init = platinum_reg_init[pinfo->vmode-1];
STORE_D2(6, 0xc6);
out_8(&cmap_regs->addr,3+32);
if (in_8(&cmap_regs->d2) == 2) {
STORE_D2(7, init->clock_params[pinfo->clktype][0]);
STORE_D2(8, init->clock_params[pinfo->clktype][1]);
STORE_D2(3, 3);
} else {
STORE_D2(4, init->clock_params[pinfo->clktype][0]);
STORE_D2(5, init->clock_params[pinfo->clktype][1]);
STORE_D2(3, 2);
}
__delay(5000);
STORE_D2(9, 0xa6);
}
/* Now how about actually saying, Make it so! */
/* Some things in here probably don't need to be done each time. */
static void platinum_set_hardware(struct fb_info_platinum *pinfo)
{
volatile struct platinum_regs __iomem *platinum_regs = pinfo->platinum_regs;
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
struct platinum_regvals *init;
int i;
int vmode, cmode;
vmode = pinfo->vmode;
cmode = pinfo->cmode;
init = platinum_reg_init[vmode - 1];
/* Initialize display timing registers */
out_be32(&platinum_regs->reg[24].r, 7); /* turn display off */
for (i = 0; i < 26; ++i)
out_be32(&platinum_regs->reg[i+32].r, init->regs[i]);
out_be32(&platinum_regs->reg[26+32].r, (pinfo->total_vram == 0x100000 ?
init->offset[cmode] + 4 - cmode :
init->offset[cmode]));
out_be32(&platinum_regs->reg[16].r, (unsigned) pinfo->frame_buffer_phys+init->fb_offset+0x10);
out_be32(&platinum_regs->reg[18].r, init->pitch[cmode]);
out_be32(&platinum_regs->reg[19].r, (pinfo->total_vram == 0x100000 ?
init->mode[cmode+1] :
init->mode[cmode]));
out_be32(&platinum_regs->reg[20].r, (pinfo->total_vram == 0x100000 ? 0x11 : 0x1011));
out_be32(&platinum_regs->reg[21].r, 0x100);
out_be32(&platinum_regs->reg[22].r, 1);
out_be32(&platinum_regs->reg[23].r, 1);
out_be32(&platinum_regs->reg[26].r, 0xc00);
out_be32(&platinum_regs->reg[27].r, 0x235);
/* out_be32(&platinum_regs->reg[27].r, 0x2aa); */
STORE_D2(0, (pinfo->total_vram == 0x100000 ?
init->dacula_ctrl[cmode] & 0xf :
init->dacula_ctrl[cmode]));
STORE_D2(1, 4);
STORE_D2(2, 0);
set_platinum_clock(pinfo);
out_be32(&platinum_regs->reg[24].r, 0); /* turn display on */
}
/*
* Set misc info vars for this driver
*/
static void platinum_init_info(struct fb_info *info,
struct fb_info_platinum *pinfo)
{
/* Fill fb_info */
info->fbops = &platinumfb_ops;
info->pseudo_palette = pinfo->pseudo_palette;
info->screen_base = pinfo->frame_buffer + 0x20;
fb_alloc_cmap(&info->cmap, 256, 0);
/* Fill fix common fields */
strcpy(info->fix.id, "platinum");
info->fix.mmio_start = pinfo->platinum_regs_phys;
info->fix.mmio_len = 0x1000;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.smem_start = pinfo->frame_buffer_phys + 0x20; /* will be updated later */
info->fix.smem_len = pinfo->total_vram - 0x20;
info->fix.ywrapstep = 0;
info->fix.xpanstep = 0;
info->fix.ypanstep = 0;
info->fix.type_aux = 0;
info->fix.accel = FB_ACCEL_NONE;
}
static int platinum_init_fb(struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
struct fb_var_screeninfo var;
int sense, rc;
sense = read_platinum_sense(pinfo);
printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
!platinum_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(sense);
if (!platinum_reg_init[default_vmode - 1])
default_vmode = VMODE_640_480_60;
}
if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
/*
* Reduce the pixel size if we don't have enough VRAM.
*/
while(default_cmode > CMODE_8 &&
platinum_vram_reqd(default_vmode, default_cmode) > pinfo->total_vram)
default_cmode--;
printk("platinumfb: Using video mode %d and color mode %d.\n", default_vmode, default_cmode);
/* Setup default var */
if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) {
/* This shouldn't happen! */
printk("mac_vmode_to_var(%d, %d,) failed\n", default_vmode, default_cmode);
try_again:
default_vmode = VMODE_640_480_60;
default_cmode = CMODE_8;
if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) {
printk(KERN_ERR "platinumfb: mac_vmode_to_var() failed\n");
return -ENXIO;
}
}
/* Initialize info structure */
platinum_init_info(info, pinfo);
/* Apply default var */
info->var = var;
var.activate = FB_ACTIVATE_NOW;
rc = fb_set_var(info, &var);
if (rc && (default_vmode != VMODE_640_480_60 || default_cmode != CMODE_8))
goto try_again;
/* Register with fbdev layer */
rc = register_framebuffer(info);
if (rc < 0)
return rc;
fb_info(info, "Apple Platinum frame buffer device\n");
return 0;
}
/*
* Get the monitor sense value.
* Note that this can be called before calibrate_delay,
* so we can't use udelay.
*/
static int read_platinum_sense(struct fb_info_platinum *info)
{
volatile struct platinum_regs __iomem *platinum_regs = info->platinum_regs;
int sense;
out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */
__delay(2000);
sense = (~in_be32(&platinum_regs->reg[23].r) & 7) << 8;
/* drive each sense line low in turn and collect the other 2 */
out_be32(&platinum_regs->reg[23].r, 3); /* drive A low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 3) << 4;
out_be32(&platinum_regs->reg[23].r, 5); /* drive B low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 4) << 1;
sense |= (~in_be32(&platinum_regs->reg[23].r) & 1) << 2;
out_be32(&platinum_regs->reg[23].r, 6); /* drive C low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 6) >> 1;
out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */
return sense;
}
/*
* This routine takes a user-supplied var, and picks the best vmode/cmode from it.
* It also updates the var structure to the actual mode data obtained
*/
static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only)
{
int vmode, cmode;
if (mac_var_to_vmode(var, &vmode, &cmode) != 0) {
printk(KERN_ERR "platinum_var_to_par: mac_var_to_vmode unsuccessful.\n");
printk(KERN_ERR "platinum_var_to_par: var->xres = %d\n", var->xres);
printk(KERN_ERR "platinum_var_to_par: var->yres = %d\n", var->yres);
printk(KERN_ERR "platinum_var_to_par: var->xres_virtual = %d\n", var->xres_virtual);
printk(KERN_ERR "platinum_var_to_par: var->yres_virtual = %d\n", var->yres_virtual);
printk(KERN_ERR "platinum_var_to_par: var->bits_per_pixel = %d\n", var->bits_per_pixel);
printk(KERN_ERR "platinum_var_to_par: var->pixclock = %d\n", var->pixclock);
printk(KERN_ERR "platinum_var_to_par: var->vmode = %d\n", var->vmode);
return -EINVAL;
}
if (!platinum_reg_init[vmode-1]) {
printk(KERN_ERR "platinum_var_to_par, vmode %d not valid.\n", vmode);
return -EINVAL;
}
if (platinum_vram_reqd(vmode, cmode) > pinfo->total_vram) {
printk(KERN_ERR "platinum_var_to_par, not enough ram for vmode %d, cmode %d.\n", vmode, cmode);
return -EINVAL;
}
if (mac_vmode_to_var(vmode, cmode, var))
return -EINVAL;
if (check_only)
return 0;
pinfo->vmode = vmode;
pinfo->cmode = cmode;
pinfo->xres = vmode_attrs[vmode-1].hres;
pinfo->yres = vmode_attrs[vmode-1].vres;
pinfo->xoffset = 0;
pinfo->yoffset = 0;
pinfo->vxres = pinfo->xres;
pinfo->vyres = pinfo->yres;
return 0;
}
/*
* Parse user specified options (`video=platinumfb:')
*/
static int __init platinumfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
default_vmode = vmode;
} else if (!strncmp(this_opt, "cmode:", 6)) {
int depth = simple_strtoul(this_opt+6, NULL, 0);
switch (depth) {
case 0:
case 8:
default_cmode = CMODE_8;
break;
case 15:
case 16:
default_cmode = CMODE_16;
break;
case 24:
case 32:
default_cmode = CMODE_32;
break;
}
}
}
return 0;
}
#ifdef __powerpc__
#define invalidate_cache(addr) \
asm volatile("eieio; dcbf 0,%1" \
: "=m" (*(addr)) : "r" (addr) : "memory");
#else
#define invalidate_cache(addr)
#endif
static int platinumfb_probe(struct platform_device* odev)
{
struct device_node *dp = odev->dev.of_node;
struct fb_info *info;
struct fb_info_platinum *pinfo;
volatile __u8 *fbuffer;
int bank0, bank1, bank2, bank3, rc;
dev_info(&odev->dev, "Found Apple Platinum video hardware\n");
info = framebuffer_alloc(sizeof(*pinfo), &odev->dev);
if (!info)
return -ENOMEM;
pinfo = info->par;
if (of_address_to_resource(dp, 0, &pinfo->rsrc_reg) ||
of_address_to_resource(dp, 1, &pinfo->rsrc_fb)) {
dev_err(&odev->dev, "Can't get resources\n");
framebuffer_release(info);
return -ENXIO;
}
dev_dbg(&odev->dev, " registers : 0x%llx...0x%llx\n",
(unsigned long long)pinfo->rsrc_reg.start,
(unsigned long long)pinfo->rsrc_reg.end);
dev_dbg(&odev->dev, " framebuffer: 0x%llx...0x%llx\n",
(unsigned long long)pinfo->rsrc_fb.start,
(unsigned long long)pinfo->rsrc_fb.end);
/* Do not try to request register space, they overlap with the
* northbridge and that can fail. Only request framebuffer
*/
if (!request_mem_region(pinfo->rsrc_fb.start,
resource_size(&pinfo->rsrc_fb),
"platinumfb framebuffer")) {
printk(KERN_ERR "platinumfb: Can't request framebuffer !\n");
framebuffer_release(info);
return -ENXIO;
}
/* frame buffer - map only 4MB */
pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
pinfo->frame_buffer = ioremap_wt(pinfo->rsrc_fb.start, 0x400000);
pinfo->base_frame_buffer = pinfo->frame_buffer;
/* registers */
pinfo->platinum_regs_phys = pinfo->rsrc_reg.start;
pinfo->platinum_regs = ioremap(pinfo->rsrc_reg.start, 0x1000);
pinfo->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */
request_mem_region(pinfo->cmap_regs_phys, 0x1000, "platinumfb cmap");
pinfo->cmap_regs = ioremap(pinfo->cmap_regs_phys, 0x1000);
/* Grok total video ram */
out_be32(&pinfo->platinum_regs->reg[16].r, (unsigned)pinfo->frame_buffer_phys);
out_be32(&pinfo->platinum_regs->reg[20].r, 0x1011); /* select max vram */
out_be32(&pinfo->platinum_regs->reg[24].r, 0); /* switch in vram */
fbuffer = pinfo->base_frame_buffer;
fbuffer[0x100000] = 0x34;
fbuffer[0x100008] = 0x0;
invalidate_cache(&fbuffer[0x100000]);
fbuffer[0x200000] = 0x56;
fbuffer[0x200008] = 0x0;
invalidate_cache(&fbuffer[0x200000]);
fbuffer[0x300000] = 0x78;
fbuffer[0x300008] = 0x0;
invalidate_cache(&fbuffer[0x300000]);
bank0 = 1; /* builtin 1MB vram, always there */
bank1 = fbuffer[0x100000] == 0x34;
bank2 = fbuffer[0x200000] == 0x56;
bank3 = fbuffer[0x300000] == 0x78;
pinfo->total_vram = (bank0 + bank1 + bank2 + bank3) * 0x100000;
printk(KERN_INFO "platinumfb: Total VRAM = %dMB (%d%d%d%d)\n",
(unsigned int) (pinfo->total_vram / 1024 / 1024),
bank3, bank2, bank1, bank0);
/*
* Try to determine whether we have an old or a new DACula.
*/
out_8(&pinfo->cmap_regs->addr, 0x40);
pinfo->dactype = in_8(&pinfo->cmap_regs->d2);
switch (pinfo->dactype) {
case 0x3c:
pinfo->clktype = 1;
printk(KERN_INFO "platinumfb: DACula type 0x3c\n");
break;
case 0x84:
pinfo->clktype = 0;
printk(KERN_INFO "platinumfb: DACula type 0x84\n");
break;
default:
pinfo->clktype = 0;
printk(KERN_INFO "platinumfb: Unknown DACula type: %x\n", pinfo->dactype);
break;
}
dev_set_drvdata(&odev->dev, info);
rc = platinum_init_fb(info);
if (rc != 0) {
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
framebuffer_release(info);
}
return rc;
}
static void platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
unregister_framebuffer (info);
/* Unmap frame buffer and registers */
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
release_mem_region(pinfo->rsrc_fb.start,
resource_size(&pinfo->rsrc_fb));
release_mem_region(pinfo->cmap_regs_phys, 0x1000);
framebuffer_release(info);
}
static struct of_device_id platinumfb_match[] =
{
{
.name = "platinum",
},
{},
};
static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",
.of_match_table = platinumfb_match,
},
.probe = platinumfb_probe,
.remove = platinumfb_remove,
};
static int __init platinumfb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("platinumfb", &option))
return -ENODEV;
platinumfb_setup(option);
#endif
platform_driver_register(&platinum_driver);
return 0;
}
static void __exit platinumfb_exit(void)
{
platform_driver_unregister(&platinum_driver);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("framebuffer driver for Apple Platinum video");
module_init(platinumfb_init);
#ifdef MODULE
module_exit(platinumfb_exit);
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Backlight driver for the Kinetic KTD253
* Based on code and know-how from the Samsung GT-S7710
* Gareth Phillips <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
/* Current ratio is n/32 from 1/32 to 32/32 */
#define KTD253_MIN_RATIO 1
#define KTD253_MAX_RATIO 32
#define KTD253_DEFAULT_RATIO 13
#define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
#define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
#define KTD253_T_OFF_MS 3
struct ktd253_backlight {
struct device *dev;
struct backlight_device *bl;
struct gpio_desc *gpiod;
u16 ratio;
};
static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
{
gpiod_set_value_cansleep(ktd253->gpiod, 1);
ndelay(KTD253_T_HIGH_NS);
/* We always fall back to this when we power on */
}
static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
{
/*
* These GPIO operations absolutely can NOT sleep so no _cansleep
* suffixes, and no using GPIO expanders on slow buses for this!
*
* The maximum number of cycles of the loop is 32 so the time taken
* should nominally be:
* (T_LOW_NS + T_HIGH_NS + loop_time) * 32
*
* Architectures do not always support ndelay() and we will get a few us
* instead. If we get to a critical time limit an interrupt has likely
* occured in the low part of the loop and we need to restart from the
* top so we have the backlight in a known state.
*/
u64 ns;
ns = ktime_get_ns();
gpiod_set_value(ktd253->gpiod, 0);
ndelay(KTD253_T_LOW_NS);
gpiod_set_value(ktd253->gpiod, 1);
ns = ktime_get_ns() - ns;
if (ns >= KTD253_T_OFF_CRIT_NS) {
dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns);
return -EAGAIN;
}
ndelay(KTD253_T_HIGH_NS);
return 0;
}
static int ktd253_backlight_update_status(struct backlight_device *bl)
{
struct ktd253_backlight *ktd253 = bl_get_data(bl);
int brightness = backlight_get_brightness(bl);
u16 target_ratio;
u16 current_ratio = ktd253->ratio;
int ret;
dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
target_ratio = brightness;
if (target_ratio == current_ratio)
/* This is already right */
return 0;
if (target_ratio == 0) {
gpiod_set_value_cansleep(ktd253->gpiod, 0);
/*
* We need to keep the GPIO low for at least this long
* to actually switch the KTD253 off.
*/
msleep(KTD253_T_OFF_MS);
ktd253->ratio = 0;
return 0;
}
if (current_ratio == 0) {
ktd253_backlight_set_max_ratio(ktd253);
current_ratio = KTD253_MAX_RATIO;
}
while (current_ratio != target_ratio) {
/*
* These GPIO operations absolutely can NOT sleep so no
* _cansleep suffixes, and no using GPIO expanders on
* slow buses for this!
*/
ret = ktd253_backlight_stepdown(ktd253);
if (ret == -EAGAIN) {
/*
* Something disturbed the backlight setting code when
* running so we need to bring the PWM back to a known
* state. This shouldn't happen too much.
*/
gpiod_set_value_cansleep(ktd253->gpiod, 0);
msleep(KTD253_T_OFF_MS);
ktd253_backlight_set_max_ratio(ktd253);
current_ratio = KTD253_MAX_RATIO;
} else if (current_ratio == KTD253_MIN_RATIO) {
/* After 1/32 we loop back to 32/32 */
current_ratio = KTD253_MAX_RATIO;
} else {
current_ratio--;
}
}
ktd253->ratio = current_ratio;
dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
return 0;
}
static const struct backlight_ops ktd253_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = ktd253_backlight_update_status,
};
static int ktd253_backlight_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct backlight_device *bl;
struct ktd253_backlight *ktd253;
u32 max_brightness;
u32 brightness;
int ret;
ktd253 = devm_kzalloc(dev, sizeof(*ktd253), GFP_KERNEL);
if (!ktd253)
return -ENOMEM;
ktd253->dev = dev;
ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
if (ret)
max_brightness = KTD253_MAX_RATIO;
if (max_brightness > KTD253_MAX_RATIO) {
/* Clamp brightness to hardware max */
dev_err(dev, "illegal max brightness specified\n");
max_brightness = KTD253_MAX_RATIO;
}
ret = device_property_read_u32(dev, "default-brightness", &brightness);
if (ret)
brightness = KTD253_DEFAULT_RATIO;
if (brightness > max_brightness) {
/* Clamp default brightness to max brightness */
dev_err(dev, "default brightness exceeds max brightness\n");
brightness = max_brightness;
}
ktd253->gpiod = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ktd253->gpiod))
return dev_err_probe(dev, PTR_ERR(ktd253->gpiod),
"gpio line missing or invalid.\n");
gpiod_set_consumer_name(ktd253->gpiod, dev_name(dev));
/* Bring backlight to a known off state */
msleep(KTD253_T_OFF_MS);
bl = devm_backlight_device_register(dev, dev_name(dev), dev, ktd253,
&ktd253_backlight_ops, NULL);
if (IS_ERR(bl)) {
dev_err(dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
bl->props.max_brightness = max_brightness;
/* When we just enable the GPIO line we set max brightness */
if (brightness) {
bl->props.brightness = brightness;
bl->props.power = BACKLIGHT_POWER_ON;
} else {
bl->props.brightness = 0;
bl->props.power = BACKLIGHT_POWER_OFF;
}
ktd253->bl = bl;
platform_set_drvdata(pdev, bl);
backlight_update_status(bl);
return 0;
}
static const struct of_device_id ktd253_backlight_of_match[] = {
{ .compatible = "kinetic,ktd253" },
{ .compatible = "kinetic,ktd259" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ktd253_backlight_of_match);
static struct platform_driver ktd253_backlight_driver = {
.driver = {
.name = "ktd253-backlight",
.of_match_table = ktd253_backlight_of_match,
},
.probe = ktd253_backlight_probe,
};
module_platform_driver(ktd253_backlight_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Kinetic KTD253 Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ktd253-backlight");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Allegro A8293 SEC driver
*
* Copyright (C) 2011 Antti Palosaari <[email protected]>
*/
#include "a8293.h"
#define A8293_FLAG_ODT 0x10
struct a8293_dev {
struct i2c_client *client;
u8 reg[2];
int volt_slew_nanos_per_mv;
};
/*
* When increasing voltage, do so in minimal steps over time, minimizing
* risk of vIN undervoltage.
*/
static int a8293_set_voltage_slew(struct a8293_dev *dev,
struct i2c_client *client,
enum fe_sec_voltage fe_sec_voltage,
int min_nanos_per_mv)
{
int ret;
u8 reg0, reg1;
int new_volt_idx;
const int idx_to_mv[] = {
0, 12709, 13042, 13375, 14042, 15042, 18042, 18709, 19042
};
const u8 idx_to_reg[] = {
0x00, 0x20, 0x21, 0x22, 0x24, 0x27, 0x28, 0x2A, 0x2B
};
int this_volt_idx;
u8 status;
int prev_volt_idx;
dev_dbg(&client->dev, "set_voltage_slew fe_sec_voltage=%d\n",
fe_sec_voltage);
/* Read status register to clear any stale faults. */
ret = i2c_master_recv(client, &status, 1);
if (ret < 0)
goto err;
/* Determine previous voltage */
switch (dev->reg[0] & 0x2F) {
case 0x00:
prev_volt_idx = 0;
break;
case 0x20:
prev_volt_idx = 1;
break;
case 0x21:
prev_volt_idx = 2;
break;
case 0x22:
prev_volt_idx = 3;
break;
case 0x24:
prev_volt_idx = 4;
break;
case 0x27:
prev_volt_idx = 5;
break;
case 0x28:
prev_volt_idx = 6;
break;
case 0x2A:
prev_volt_idx = 7;
break;
case 0x2B:
prev_volt_idx = 8;
break;
default:
prev_volt_idx = 0;
}
/* Determine new voltage */
switch (fe_sec_voltage) {
case SEC_VOLTAGE_OFF:
new_volt_idx = 0;
break;
case SEC_VOLTAGE_13:
new_volt_idx = 2;
break;
case SEC_VOLTAGE_18:
new_volt_idx = 6;
break;
default:
ret = -EINVAL;
goto err;
}
/* Slew to new voltage if new voltage is greater than current voltage */
this_volt_idx = prev_volt_idx;
if (this_volt_idx < new_volt_idx) {
while (this_volt_idx < new_volt_idx) {
int delta_mv = idx_to_mv[this_volt_idx+1] - idx_to_mv[this_volt_idx];
int min_wait_time = delta_mv * min_nanos_per_mv;
reg0 = idx_to_reg[this_volt_idx+1];
reg0 |= A8293_FLAG_ODT;
ret = i2c_master_send(client, ®0, 1);
if (ret < 0)
goto err;
dev->reg[0] = reg0;
this_volt_idx++;
usleep_range(min_wait_time, min_wait_time * 2);
}
} else { /* Else just set the voltage */
reg0 = idx_to_reg[new_volt_idx];
reg0 |= A8293_FLAG_ODT;
ret = i2c_master_send(client, ®0, 1);
if (ret < 0)
goto err;
dev->reg[0] = reg0;
}
/* TMODE=0, TGATE=1 */
reg1 = 0x82;
if (reg1 != dev->reg[1]) {
ret = i2c_master_send(client, ®1, 1);
if (ret < 0)
goto err;
dev->reg[1] = reg1;
}
usleep_range(1500, 5000);
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int a8293_set_voltage_noslew(struct dvb_frontend *fe,
enum fe_sec_voltage fe_sec_voltage)
{
struct a8293_dev *dev = fe->sec_priv;
struct i2c_client *client = dev->client;
int ret;
u8 reg0, reg1;
dev_dbg(&client->dev, "set_voltage_noslew fe_sec_voltage=%d\n",
fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_OFF:
/* ENB=0 */
reg0 = 0x10;
break;
case SEC_VOLTAGE_13:
/* VSEL0=1, VSEL1=0, VSEL2=0, VSEL3=0, ENB=1*/
reg0 = 0x31;
break;
case SEC_VOLTAGE_18:
/* VSEL0=0, VSEL1=0, VSEL2=0, VSEL3=1, ENB=1*/
reg0 = 0x38;
break;
default:
ret = -EINVAL;
goto err;
}
if (reg0 != dev->reg[0]) {
ret = i2c_master_send(client, ®0, 1);
if (ret < 0)
goto err;
dev->reg[0] = reg0;
}
/* TMODE=0, TGATE=1 */
reg1 = 0x82;
if (reg1 != dev->reg[1]) {
ret = i2c_master_send(client, ®1, 1);
if (ret < 0)
goto err;
dev->reg[1] = reg1;
}
usleep_range(1500, 50000);
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int a8293_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage fe_sec_voltage)
{
struct a8293_dev *dev = fe->sec_priv;
struct i2c_client *client = dev->client;
int volt_slew_nanos_per_mv = dev->volt_slew_nanos_per_mv;
dev_dbg(&client->dev, "set_voltage volt_slew_nanos_per_mv=%d\n",
volt_slew_nanos_per_mv);
/* Use slew version if slew rate is set to a sane value */
if (volt_slew_nanos_per_mv > 0 && volt_slew_nanos_per_mv < 1600)
a8293_set_voltage_slew(dev, client, fe_sec_voltage,
volt_slew_nanos_per_mv);
else
a8293_set_voltage_noslew(fe, fe_sec_voltage);
return 0;
}
static int a8293_probe(struct i2c_client *client)
{
struct a8293_dev *dev;
struct a8293_platform_data *pdata = client->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
int ret;
u8 buf[2];
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
}
dev->client = client;
dev->volt_slew_nanos_per_mv = pdata->volt_slew_nanos_per_mv;
/* check if the SEC is there */
ret = i2c_master_recv(client, buf, 2);
if (ret < 0)
goto err_kfree;
/* override frontend ops */
fe->ops.set_voltage = a8293_set_voltage;
fe->sec_priv = dev;
i2c_set_clientdata(client, dev);
dev_info(&client->dev, "Allegro A8293 SEC successfully attached\n");
return 0;
err_kfree:
kfree(dev);
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static void a8293_remove(struct i2c_client *client)
{
struct a8293_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
}
static const struct i2c_device_id a8293_id_table[] = {
{ "a8293" },
{}
};
MODULE_DEVICE_TABLE(i2c, a8293_id_table);
static struct i2c_driver a8293_driver = {
.driver = {
.name = "a8293",
.suppress_bind_attrs = true,
},
.probe = a8293_probe,
.remove = a8293_remove,
.id_table = a8293_id_table,
};
module_i2c_driver(a8293_driver);
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
MODULE_DESCRIPTION("Allegro A8293 SEC driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997, 2007 David S. Miller ([email protected])
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/highmem.h>
#include <linux/highuid.h>
#include <linux/mman.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/dnotify.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/mmu_context.h>
#include <asm/compat_signal.h>
#include "systbls.h"
COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, path, u32, high, u32, low)
{
return ksys_truncate(path, ((u64)high << 32) | low);
}
COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, u32, high, u32, low)
{
return ksys_ftruncate(fd, ((u64)high << 32) | low);
}
static int cp_compat_stat64(struct kstat *stat,
struct compat_stat64 __user *statbuf)
{
int err;
err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev);
err |= put_user(stat->ino, &statbuf->st_ino);
err |= put_user(stat->mode, &statbuf->st_mode);
err |= put_user(stat->nlink, &statbuf->st_nlink);
err |= put_user(from_kuid_munged(current_user_ns(), stat->uid), &statbuf->st_uid);
err |= put_user(from_kgid_munged(current_user_ns(), stat->gid), &statbuf->st_gid);
err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev);
err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]);
err |= put_user(stat->size, &statbuf->st_size);
err |= put_user(stat->blksize, &statbuf->st_blksize);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]);
err |= put_user(stat->blocks, &statbuf->st_blocks);
err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
err |= put_user(0, &statbuf->__unused4);
err |= put_user(0, &statbuf->__unused5);
return err;
}
COMPAT_SYSCALL_DEFINE2(stat64, const char __user *, filename,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE2(lstat64, const char __user *, filename,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE2(fstat64, unsigned int, fd,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE4(fstatat64, unsigned int, dfd,
const char __user *, filename,
struct compat_stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_compat_stat64(&stat, statbuf);
}
COMPAT_SYSCALL_DEFINE3(sparc_sigaction, int, sig,
struct compat_old_sigaction __user *,act,
struct compat_old_sigaction __user *,oact)
{
WARN_ON_ONCE(sig >= 0);
return compat_sys_sigaction(-sig, act, oact);
}
COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
struct compat_sigaction __user *,act,
struct compat_sigaction __user *,oact,
void __user *,restorer,
compat_size_t,sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (act) {
u32 u_handler, u_restorer;
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
if (ret)
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
sizeof(oact->sa_mask));
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
ret = -EFAULT;
}
return ret;
}
COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, ubuf,
compat_size_t, count, u32, poshi, u32, poslo)
{
return ksys_pread64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, char __user *, ubuf,
compat_size_t, count, u32, poshi, u32, poslo)
{
return ksys_pwrite64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
COMPAT_SYSCALL_DEFINE4(readahead, int, fd, u32, offhi, u32, offlo,
compat_size_t, count)
{
return ksys_readahead(fd, ((u64)offhi << 32) | offlo, count);
}
COMPAT_SYSCALL_DEFINE5(fadvise64, int, fd, u32, offhi, u32, offlo,
compat_size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, ((u64)offhi << 32) | offlo, len, advice);
}
COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, u32, offhi, u32, offlo,
u32, lenhi, u32, lenlo, int, advice)
{
return ksys_fadvise64_64(fd,
((u64)offhi << 32) | offlo,
((u64)lenhi << 32) | lenlo,
advice);
}
COMPAT_SYSCALL_DEFINE6(sync_file_range, unsigned int, fd, u32, off_high, u32, off_low,
u32, nb_high, u32, nb_low, unsigned int, flags)
{
return ksys_sync_file_range(fd,
((u64)off_high << 32) | off_low,
((u64)nb_high << 32) | nb_low,
flags);
}
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, u32, offhi, u32, offlo,
u32, lenhi, u32, lenlo)
{
return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.