code
stringlengths
0
23.9M
// SPDX-License-Identifier: GPL-2.0-or-later /* * cx18 functions to query card hardware * * Derived from ivtv-cards.c * * Copyright (C) 2007 Hans Verkuil <[email protected]> * Copyright (C) 2008 Andy Walls <[email protected]> */ #include "cx18-driver.h" #include "cx18-cards.h" #include "cx18-av-core.h" #include "cx18-i2c.h" #include <media/i2c/cs5345.h> #define V4L2_STD_PAL_SECAM (V4L2_STD_PAL|V4L2_STD_SECAM) /********************** card configuration *******************************/ /* usual i2c tuner addresses to probe */ static struct cx18_card_tuner_i2c cx18_i2c_std = { .radio = { I2C_CLIENT_END }, .demod = { 0x43, I2C_CLIENT_END }, .tv = { 0x61, 0x60, I2C_CLIENT_END }, }; /* * usual i2c tuner addresses to probe with additional demod address for * an NXP TDA8295 at 0x42 (N.B. it can possibly be at 0x4b or 0x4c too). */ static struct cx18_card_tuner_i2c cx18_i2c_nxp = { .radio = { I2C_CLIENT_END }, .demod = { 0x42, 0x43, I2C_CLIENT_END }, .tv = { 0x61, 0x60, I2C_CLIENT_END }, }; /* Please add new PCI IDs to: https://pci-ids.ucw.cz/ This keeps the PCI ID database up to date. Note that the entries must be added under vendor 0x4444 (Conexant) as subsystem IDs. New vendor IDs should still be added to the vendor ID list. */ /* Hauppauge HVR-1600 cards */ /* Note: for Hauppauge cards the tveeprom information is used instead of PCI IDs */ static const struct cx18_card cx18_card_hvr1600_esmt = { .type = CX18_CARD_HVR_1600_ESMT, .name = "Hauppauge HVR-1600", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* ESMT M13S128324A-5B memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x44220e82, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x3001, .gpio_init.direction = 0x3001, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3001, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_std, }; static const struct cx18_card cx18_card_hvr1600_s5h1411 = { .type = CX18_CARD_HVR_1600_S5H1411, .name = "Hauppauge HVR-1600", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* ESMT M13S128324A-5B memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x44220e82, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x3801, .gpio_init.direction = 0x3801, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3801, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_nxp, }; static const struct cx18_card cx18_card_hvr1600_samsung = { .type = CX18_CARD_HVR_1600_SAMSUNG, .name = "Hauppauge HVR-1600 (Preproduction)", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* Samsung K4D263238G-VC33 memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x23230b73, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x3001, .gpio_init.direction = 0x3001, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3001, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Compro VideoMate H900: note that this card is analog only! */ static const struct cx18_card_pci_info cx18_pci_h900[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_COMPRO, 0xe100 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_h900 = { .type = CX18_CARD_COMPRO_H900, .name = "Compro VideoMate H900", .comment = "Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 }, .tuners = { { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .ddr = { /* EtronTech EM6A9160TS-5G memory */ .chip_config = 0x50003, .refresh = 0x753, .timing1 = 0x24330e84, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0, }, .xceive_pin = 15, .pci_list = cx18_pci_h900, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Yuan MPC718: not working at the moment! */ static const struct cx18_card_pci_info cx18_pci_mpc718[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_YUAN, 0x0718 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_mpc718 = { .type = CX18_CARD_YUAN_MPC718, .name = "Yuan MPC718 MiniPCI DVB-T/Analog", .comment = "Experimenters needed for device to work well.\n" "\tTo help, mail the linux-media list (www.linuxtv.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, /* FIXME - the FM radio is just a guess and driver doesn't use SIF */ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Hynix HY5DU283222B DDR RAM */ .chip_config = 0x303, .refresh = 0x3bd, .timing1 = 0x36320966, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x1, .gpio_init.direction = 0x3, /* FIXME - these GPIO's are just guesses */ .gpio_audio_input = { .mask = 0x3, .tuner = 0x1, .linein = 0x3, .radio = 0x1 }, .xceive_pin = 0, .pci_list = cx18_pci_mpc718, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* GoTView PCI */ static const struct cx18_card_pci_info cx18_pci_gotview_dvd3[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_GOTVIEW, 0x3343 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_gotview_dvd3 = { .type = CX18_CARD_GOTVIEW_PCI_DVD3, .name = "GoTView PCI DVD3 Hybrid", .comment = "Experimenters needed for device to work well.\n" "\tTo help, mail the linux-media list (www.linuxtv.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, /* FIXME - the FM radio is just a guess and driver doesn't use SIF */ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Hynix HY5DU283222B DDR RAM */ .chip_config = 0x303, .refresh = 0x3bd, .timing1 = 0x36320966, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x1, .gpio_init.direction = 0x3, .gpio_audio_input = { .mask = 0x3, .tuner = 0x1, .linein = 0x2, .radio = 0x1 }, .xceive_pin = 0, .pci_list = cx18_pci_gotview_dvd3, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Conexant Raptor PAL/SECAM: note that this card is analog only! */ static const struct cx18_card_pci_info cx18_pci_cnxt_raptor_pal[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_CONEXANT, 0x0009 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_cnxt_raptor_pal = { .type = CX18_CARD_CNXT_RAPTOR_PAL, .name = "Conexant Raptor PAL/SECAM", .comment = "Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { { .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 2 }, .ddr = { /* MT 46V16M16 memory */ .chip_config = 0x50306, .refresh = 0x753, .timing1 = 0x33220953, .timing2 = 0x09, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x1002, .gpio_init.direction = 0xf002, .gpio_audio_input = { .mask = 0xf002, .tuner = 0x1002, /* LED D1 Tuner AF */ .linein = 0x2000, /* LED D2 Line In 1 */ .radio = 0x4002 }, /* LED D3 Tuner AF */ .pci_list = cx18_pci_cnxt_raptor_pal, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Toshiba Qosmio laptop internal DVB-T/Analog Hybrid Tuner */ static const struct cx18_card_pci_info cx18_pci_toshiba_qosmio_dvbt[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_TOSHIBA, 0x0110 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = { .type = CX18_CARD_TOSHIBA_QOSMIO_DVBT, .name = "Toshiba Qosmio DVB-T/Analog", .comment = "Experimenters and photos needed for device to work well.\n" "\tTo help, mail the linux-media list (www.linuxtv.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE6 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .ddr = { .chip_config = 0x202, .refresh = 0x3bb, .timing1 = 0x33320a63, .timing2 = 0x0a, .tune_lane = 0, .initial_emrs = 0x42, }, .xceive_pin = 15, .pci_list = cx18_pci_toshiba_qosmio_dvbt, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Leadtek WinFast PVR2100 */ static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */ { 0, 0, 0 } }; static const struct cx18_card cx18_card_leadtek_pvr2100 = { .type = CX18_CARD_LEADTEK_PVR2100, .name = "Leadtek WinFast PVR2100", .comment = "Experimenters and photos needed for device to work well.\n" "\tTo help, mail the linux-media list (www.linuxtv.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { /* XC2028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Pointer to proper DDR config values provided by Terry Wu */ .chip_config = 0x303, .refresh = 0x3bb, .timing1 = 0x24220e83, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0x2, }, .gpio_init.initial_value = 0x6, .gpio_init.direction = 0x7, .gpio_audio_input = { .mask = 0x7, .tuner = 0x6, .linein = 0x2, .radio = 0x2 }, .xceive_pin = 1, .pci_list = cx18_pci_leadtek_pvr2100, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Leadtek WinFast DVR3100 H */ static const struct cx18_card_pci_info cx18_pci_leadtek_dvr3100h[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */ { 0, 0, 0 } }; static const struct cx18_card cx18_card_leadtek_dvr3100h = { .type = CX18_CARD_LEADTEK_DVR3100H, .name = "Leadtek WinFast DVR3100 H", .comment = "Simultaneous DVB-T and Analog capture supported,\n" "\texcept when capturing Analog from the antenna input.\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Pointer to proper DDR config values provided by Terry Wu */ .chip_config = 0x303, .refresh = 0x3bb, .timing1 = 0x24220e83, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0x2, }, .gpio_init.initial_value = 0x6, .gpio_init.direction = 0x7, .gpio_audio_input = { .mask = 0x7, .tuner = 0x6, .linein = 0x2, .radio = 0x2 }, .xceive_pin = 1, .pci_list = cx18_pci_leadtek_dvr3100h, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ static const struct cx18_card *cx18_card_list[] = { &cx18_card_hvr1600_esmt, &cx18_card_hvr1600_samsung, &cx18_card_h900, &cx18_card_mpc718, &cx18_card_cnxt_raptor_pal, &cx18_card_toshiba_qosmio_dvbt, &cx18_card_leadtek_pvr2100, &cx18_card_leadtek_dvr3100h, &cx18_card_gotview_dvd3, &cx18_card_hvr1600_s5h1411 }; const struct cx18_card *cx18_get_card(u16 index) { if (index >= ARRAY_SIZE(cx18_card_list)) return NULL; return cx18_card_list[index]; } int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input) { const struct cx18_card_video_input *card_input = cx->card->video_inputs + index; static const char * const input_strs[] = { "Tuner 1", "S-Video 1", "S-Video 2", "Composite 1", "Composite 2", "Component 1" }; if (index >= cx->nof_inputs) return -EINVAL; input->index = index; strscpy(input->name, input_strs[card_input->video_type - 1], sizeof(input->name)); input->type = (card_input->video_type == CX18_CARD_INPUT_VID_TUNER ? V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA); input->audioset = (1 << cx->nof_audio_inputs) - 1; input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ? cx->tuner_std : V4L2_STD_ALL; return 0; } int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *audio) { const struct cx18_card_audio_input *aud_input = cx->card->audio_inputs + index; static const char * const input_strs[] = { "Tuner 1", "Line In 1", "Line In 2" }; memset(audio, 0, sizeof(*audio)); if (index >= cx->nof_audio_inputs) return -EINVAL; strscpy(audio->name, input_strs[aud_input->audio_type - 1], sizeof(audio->name)); audio->index = index; audio->capability = V4L2_AUDCAP_STEREO; return 0; }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2022 Analog Devices, Inc. * Author: Cosmin Tanislav <[email protected]> */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/crc8.h> #include <linux/device.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <linux/units.h> #include <linux/unaligned.h> #include <linux/iio/buffer.h> #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #define AD74115_NAME "ad74115" #define AD74115_CH_FUNC_SETUP_REG 0x01 #define AD74115_ADC_CONFIG_REG 0x02 #define AD74115_ADC_CONFIG_CONV2_RATE_MASK GENMASK(15, 13) #define AD74115_ADC_CONFIG_CONV1_RATE_MASK GENMASK(12, 10) #define AD74115_ADC_CONFIG_CONV2_RANGE_MASK GENMASK(9, 7) #define AD74115_ADC_CONFIG_CONV1_RANGE_MASK GENMASK(6, 4) #define AD74115_PWR_OPTIM_CONFIG_REG 0x03 #define AD74115_DIN_CONFIG1_REG 0x04 #define AD74115_DIN_COMPARATOR_EN_MASK BIT(13) #define AD74115_DIN_SINK_MASK GENMASK(11, 7) #define AD74115_DIN_DEBOUNCE_MASK GENMASK(4, 0) #define AD74115_DIN_CONFIG2_REG 0x05 #define AD74115_COMP_THRESH_MASK GENMASK(6, 0) #define AD74115_OUTPUT_CONFIG_REG 0x06 #define AD74115_OUTPUT_SLEW_EN_MASK GENMASK(6, 5) #define AD74115_OUTPUT_SLEW_LIN_STEP_MASK GENMASK(4, 3) #define AD74115_OUTPUT_SLEW_LIN_RATE_MASK GENMASK(2, 1) #define AD74115_RTD3W4W_CONFIG_REG 0x07 #define AD74115_BURNOUT_CONFIG_REG 0x0a #define AD74115_BURNOUT_EXT2_EN_MASK BIT(10) #define AD74115_BURNOUT_EXT1_EN_MASK BIT(5) #define AD74115_BURNOUT_VIOUT_EN_MASK BIT(0) #define AD74115_DAC_CODE_REG 0x0b #define AD74115_DAC_ACTIVE_REG 0x0d #define AD74115_GPIO_CONFIG_X_REG(x) (0x35 + (x)) #define AD74115_GPIO_CONFIG_GPI_DATA BIT(5) #define AD74115_GPIO_CONFIG_GPO_DATA BIT(4) #define AD74115_GPIO_CONFIG_SELECT_MASK GENMASK(2, 0) #define AD74115_CHARGE_PUMP_REG 0x3a #define AD74115_ADC_CONV_CTRL_REG 0x3b #define AD74115_ADC_CONV_SEQ_MASK GENMASK(13, 12) #define AD74115_DIN_COMP_OUT_REG 0x40 #define AD74115_LIVE_STATUS_REG 0x42 #define AD74115_ADC_DATA_RDY_MASK BIT(3) #define AD74115_READ_SELECT_REG 0x64 #define AD74115_CMD_KEY_REG 0x78 #define AD74115_CMD_KEY_RESET1 0x15fa #define AD74115_CMD_KEY_RESET2 0xaf51 #define AD74115_CRC_POLYNOMIAL 0x7 DECLARE_CRC8_TABLE(ad74115_crc8_table); #define AD74115_ADC_CODE_MAX ((int)GENMASK(15, 0)) #define AD74115_ADC_CODE_HALF (AD74115_ADC_CODE_MAX / 2) #define AD74115_DAC_VOLTAGE_MAX 12000 #define AD74115_DAC_CURRENT_MAX 25 #define AD74115_DAC_CODE_MAX ((int)GENMASK(13, 0)) #define AD74115_DAC_CODE_HALF (AD74115_DAC_CODE_MAX / 2) #define AD74115_COMP_THRESH_MAX 98 #define AD74115_SENSE_RESISTOR_OHMS 100 #define AD74115_REF_RESISTOR_OHMS 2100 #define AD74115_DIN_SINK_LOW_STEP 120 #define AD74115_DIN_SINK_HIGH_STEP 240 #define AD74115_DIN_SINK_MAX 31 #define AD74115_FRAME_SIZE 4 #define AD74115_GPIO_NUM 4 #define AD74115_CONV_TIME_US 1000000 enum ad74115_dac_ch { AD74115_DAC_CH_MAIN, AD74115_DAC_CH_COMPARATOR, }; enum ad74115_adc_ch { AD74115_ADC_CH_CONV1, AD74115_ADC_CH_CONV2, AD74115_ADC_CH_NUM }; enum ad74115_ch_func { AD74115_CH_FUNC_HIGH_IMPEDANCE, AD74115_CH_FUNC_VOLTAGE_OUTPUT, AD74115_CH_FUNC_CURRENT_OUTPUT, AD74115_CH_FUNC_VOLTAGE_INPUT, AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER, AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER, AD74115_CH_FUNC_2_WIRE_RESISTANCE_INPUT, AD74115_CH_FUNC_3_4_WIRE_RESISTANCE_INPUT, AD74115_CH_FUNC_DIGITAL_INPUT_LOGIC, AD74115_CH_FUNC_DIGITAL_INPUT_LOOP_POWER, AD74115_CH_FUNC_CURRENT_OUTPUT_HART, AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER_HART, AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART, AD74115_CH_FUNC_MAX = AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART, AD74115_CH_FUNC_NUM }; enum ad74115_adc_range { AD74115_ADC_RANGE_12V, AD74115_ADC_RANGE_12V_BIPOLAR, AD74115_ADC_RANGE_2_5V_BIPOLAR, AD74115_ADC_RANGE_2_5V_NEG, AD74115_ADC_RANGE_2_5V, AD74115_ADC_RANGE_0_625V, AD74115_ADC_RANGE_104MV_BIPOLAR, AD74115_ADC_RANGE_12V_OTHER, AD74115_ADC_RANGE_MAX = AD74115_ADC_RANGE_12V_OTHER, AD74115_ADC_RANGE_NUM }; enum ad74115_adc_conv_seq { AD74115_ADC_CONV_SEQ_STANDBY = 0b00, AD74115_ADC_CONV_SEQ_SINGLE = 0b01, AD74115_ADC_CONV_SEQ_CONTINUOUS = 0b10, }; enum ad74115_din_threshold_mode { AD74115_DIN_THRESHOLD_MODE_AVDD, AD74115_DIN_THRESHOLD_MODE_FIXED, AD74115_DIN_THRESHOLD_MODE_MAX = AD74115_DIN_THRESHOLD_MODE_FIXED, }; enum ad74115_slew_mode { AD74115_SLEW_MODE_DISABLED, AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_MODE_HART, }; enum ad74115_slew_step { AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_STEP_22_2_PERCENT, }; enum ad74115_slew_rate { AD74115_SLEW_RATE_4KHZ, AD74115_SLEW_RATE_64KHZ, AD74115_SLEW_RATE_150KHZ, AD74115_SLEW_RATE_240KHZ, }; enum ad74115_gpio_config { AD74115_GPIO_CONFIG_OUTPUT_BUFFERED = 0b010, AD74115_GPIO_CONFIG_INPUT = 0b011, }; enum ad74115_gpio_mode { AD74115_GPIO_MODE_LOGIC = 1, AD74115_GPIO_MODE_SPECIAL = 2, }; struct ad74115_channels { const struct iio_chan_spec *channels; unsigned int num_channels; }; struct ad74115_state { struct spi_device *spi; struct regmap *regmap; struct iio_trigger *trig; /* * Synchronize consecutive operations when doing a one-shot * conversion and when updating the ADC samples SPI message. */ struct mutex lock; struct gpio_chip gc; struct gpio_chip comp_gc; int irq; unsigned int avdd_mv; unsigned long gpio_valid_mask; bool dac_bipolar; bool dac_hart_slew; bool rtd_mode_4_wire; enum ad74115_ch_func ch_func; enum ad74115_din_threshold_mode din_threshold_mode; struct completion adc_data_completion; struct spi_message adc_samples_msg; struct spi_transfer adc_samples_xfer[AD74115_ADC_CH_NUM + 1]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ u8 reg_tx_buf[AD74115_FRAME_SIZE] __aligned(IIO_DMA_MINALIGN); u8 reg_rx_buf[AD74115_FRAME_SIZE]; u8 adc_samples_tx_buf[AD74115_FRAME_SIZE * AD74115_ADC_CH_NUM]; u8 adc_samples_rx_buf[AD74115_FRAME_SIZE * AD74115_ADC_CH_NUM]; }; struct ad74115_fw_prop { const char *name; bool is_boolean; bool negate; unsigned int max; unsigned int reg; unsigned int mask; const unsigned int *lookup_tbl; unsigned int lookup_tbl_len; }; #define AD74115_FW_PROP(_name, _max, _reg, _mask) \ { \ .name = (_name), \ .max = (_max), \ .reg = (_reg), \ .mask = (_mask), \ } #define AD74115_FW_PROP_TBL(_name, _tbl, _reg, _mask) \ { \ .name = (_name), \ .reg = (_reg), \ .mask = (_mask), \ .lookup_tbl = (_tbl), \ .lookup_tbl_len = ARRAY_SIZE(_tbl), \ } #define AD74115_FW_PROP_BOOL(_name, _reg, _mask) \ { \ .name = (_name), \ .is_boolean = true, \ .reg = (_reg), \ .mask = (_mask), \ } #define AD74115_FW_PROP_BOOL_NEG(_name, _reg, _mask) \ { \ .name = (_name), \ .is_boolean = true, \ .negate = true, \ .reg = (_reg), \ .mask = (_mask), \ } static const int ad74115_dac_rate_tbl[] = { 0, 4 * 8, 4 * 15, 4 * 61, 4 * 222, 64 * 8, 64 * 15, 64 * 61, 64 * 222, 150 * 8, 150 * 15, 150 * 61, 150 * 222, 240 * 8, 240 * 15, 240 * 61, 240 * 222, }; static const unsigned int ad74115_dac_rate_step_tbl[][3] = { { AD74115_SLEW_MODE_DISABLED }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_4KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_4KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_4KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_4KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_64KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_64KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_64KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_64KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_150KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_150KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_150KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_150KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_240KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_240KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_240KHZ }, { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_240KHZ }, }; static const unsigned int ad74115_rtd_excitation_current_ua_tbl[] = { 250, 500, 750, 1000 }; static const unsigned int ad74115_burnout_current_na_tbl[] = { 0, 50, 0, 500, 1000, 0, 10000, 0 }; static const unsigned int ad74115_viout_burnout_current_na_tbl[] = { 0, 0, 0, 0, 1000, 0, 10000, 0 }; static const unsigned int ad74115_gpio_mode_tbl[] = { 0, 0, 0, 1, 2, 3, 4, 5 }; static const unsigned int ad74115_adc_conv_rate_tbl[] = { 10, 20, 1200, 4800, 9600 }; static const unsigned int ad74115_debounce_tbl[] = { 0, 13, 18, 24, 32, 42, 56, 75, 100, 130, 180, 240, 320, 420, 560, 750, 1000, 1300, 1800, 2400, 3200, 4200, 5600, 7500, 10000, 13000, 18000, 24000, 32000, 42000, 56000, 75000, }; static const unsigned int ad74115_adc_ch_data_regs_tbl[] = { [AD74115_ADC_CH_CONV1] = 0x44, [AD74115_ADC_CH_CONV2] = 0x46, }; static const unsigned int ad74115_adc_ch_en_bit_tbl[] = { [AD74115_ADC_CH_CONV1] = BIT(0), [AD74115_ADC_CH_CONV2] = BIT(1), }; static const bool ad74115_adc_bipolar_tbl[AD74115_ADC_RANGE_NUM] = { [AD74115_ADC_RANGE_12V_BIPOLAR] = true, [AD74115_ADC_RANGE_2_5V_BIPOLAR] = true, [AD74115_ADC_RANGE_104MV_BIPOLAR] = true, }; static const unsigned int ad74115_adc_conv_mul_tbl[AD74115_ADC_RANGE_NUM] = { [AD74115_ADC_RANGE_12V] = 12000, [AD74115_ADC_RANGE_12V_BIPOLAR] = 24000, [AD74115_ADC_RANGE_2_5V_BIPOLAR] = 5000, [AD74115_ADC_RANGE_2_5V_NEG] = 2500, [AD74115_ADC_RANGE_2_5V] = 2500, [AD74115_ADC_RANGE_0_625V] = 625, [AD74115_ADC_RANGE_104MV_BIPOLAR] = 208, [AD74115_ADC_RANGE_12V_OTHER] = 12000, }; static const unsigned int ad74115_adc_gain_tbl[AD74115_ADC_RANGE_NUM][2] = { [AD74115_ADC_RANGE_12V] = { 5, 24 }, [AD74115_ADC_RANGE_12V_BIPOLAR] = { 5, 24 }, [AD74115_ADC_RANGE_2_5V_BIPOLAR] = { 1, 1 }, [AD74115_ADC_RANGE_2_5V_NEG] = { 1, 1 }, [AD74115_ADC_RANGE_2_5V] = { 1, 1 }, [AD74115_ADC_RANGE_0_625V] = { 4, 1 }, [AD74115_ADC_RANGE_104MV_BIPOLAR] = { 24, 1 }, [AD74115_ADC_RANGE_12V_OTHER] = { 5, 24 }, }; static const int ad74115_adc_range_tbl[AD74115_ADC_RANGE_NUM][2] = { [AD74115_ADC_RANGE_12V] = { 0, 12000000 }, [AD74115_ADC_RANGE_12V_BIPOLAR] = { -12000000, 12000000 }, [AD74115_ADC_RANGE_2_5V_BIPOLAR] = { -2500000, 2500000 }, [AD74115_ADC_RANGE_2_5V_NEG] = { -2500000, 0 }, [AD74115_ADC_RANGE_2_5V] = { 0, 2500000 }, [AD74115_ADC_RANGE_0_625V] = { 0, 625000 }, [AD74115_ADC_RANGE_104MV_BIPOLAR] = { -104000, 104000 }, [AD74115_ADC_RANGE_12V_OTHER] = { 0, 12000000 }, }; static int _ad74115_find_tbl_index(const unsigned int *tbl, unsigned int tbl_len, unsigned int val, unsigned int *index) { unsigned int i; for (i = 0; i < tbl_len; i++) if (val == tbl[i]) { *index = i; return 0; } return -EINVAL; } #define ad74115_find_tbl_index(tbl, val, index) \ _ad74115_find_tbl_index(tbl, ARRAY_SIZE(tbl), val, index) static int ad74115_crc(u8 *buf) { return crc8(ad74115_crc8_table, buf, 3, 0); } static void ad74115_format_reg_write(u8 reg, u16 val, u8 *buf) { buf[0] = reg; put_unaligned_be16(val, &buf[1]); buf[3] = ad74115_crc(buf); } static int ad74115_reg_write(void *context, unsigned int reg, unsigned int val) { struct ad74115_state *st = context; ad74115_format_reg_write(reg, val, st->reg_tx_buf); return spi_write(st->spi, st->reg_tx_buf, AD74115_FRAME_SIZE); } static int ad74115_crc_check(struct ad74115_state *st, u8 *buf) { struct device *dev = &st->spi->dev; u8 expected_crc = ad74115_crc(buf); if (buf[3] != expected_crc) { dev_err(dev, "Bad CRC %02x for %02x%02x%02x, expected %02x\n", buf[3], buf[0], buf[1], buf[2], expected_crc); return -EINVAL; } return 0; } static int ad74115_reg_read(void *context, unsigned int reg, unsigned int *val) { struct ad74115_state *st = context; struct spi_transfer reg_read_xfer[] = { { .tx_buf = st->reg_tx_buf, .len = sizeof(st->reg_tx_buf), .cs_change = 1, }, { .rx_buf = st->reg_rx_buf, .len = sizeof(st->reg_rx_buf), }, }; int ret; ad74115_format_reg_write(AD74115_READ_SELECT_REG, reg, st->reg_tx_buf); ret = spi_sync_transfer(st->spi, reg_read_xfer, ARRAY_SIZE(reg_read_xfer)); if (ret) return ret; ret = ad74115_crc_check(st, st->reg_rx_buf); if (ret) return ret; *val = get_unaligned_be16(&st->reg_rx_buf[1]); return 0; } static const struct regmap_config ad74115_regmap_config = { .reg_bits = 8, .val_bits = 16, .reg_read = ad74115_reg_read, .reg_write = ad74115_reg_write, }; static int ad74115_gpio_config_set(struct ad74115_state *st, unsigned int offset, enum ad74115_gpio_config cfg) { return regmap_update_bits(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), AD74115_GPIO_CONFIG_SELECT_MASK, FIELD_PREP(AD74115_GPIO_CONFIG_SELECT_MASK, cfg)); } static int ad74115_gpio_init_valid_mask(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios) { struct ad74115_state *st = gpiochip_get_data(gc); *valid_mask = st->gpio_valid_mask; return 0; } static int ad74115_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) { struct ad74115_state *st = gpiochip_get_data(gc); unsigned int val; int ret; ret = regmap_read(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), &val); if (ret) return ret; return FIELD_GET(AD74115_GPIO_CONFIG_SELECT_MASK, val) == AD74115_GPIO_CONFIG_INPUT; } static int ad74115_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { struct ad74115_state *st = gpiochip_get_data(gc); return ad74115_gpio_config_set(st, offset, AD74115_GPIO_CONFIG_INPUT); } static int ad74115_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) { struct ad74115_state *st = gpiochip_get_data(gc); return ad74115_gpio_config_set(st, offset, AD74115_GPIO_CONFIG_OUTPUT_BUFFERED); } static int ad74115_gpio_get(struct gpio_chip *gc, unsigned int offset) { struct ad74115_state *st = gpiochip_get_data(gc); unsigned int val; int ret; ret = regmap_read(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), &val); if (ret) return ret; return FIELD_GET(AD74115_GPIO_CONFIG_GPI_DATA, val); } static void ad74115_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct ad74115_state *st = gpiochip_get_data(gc); struct device *dev = &st->spi->dev; int ret; ret = regmap_update_bits(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), AD74115_GPIO_CONFIG_GPO_DATA, FIELD_PREP(AD74115_GPIO_CONFIG_GPO_DATA, value)); if (ret) dev_err(dev, "Failed to set GPIO %u output value, err: %d\n", offset, ret); } static int ad74115_set_comp_debounce(struct ad74115_state *st, unsigned int val) { unsigned int len = ARRAY_SIZE(ad74115_debounce_tbl); unsigned int i; for (i = 0; i < len; i++) if (val <= ad74115_debounce_tbl[i]) break; if (i == len) i = len - 1; return regmap_update_bits(st->regmap, AD74115_DIN_CONFIG1_REG, AD74115_DIN_DEBOUNCE_MASK, FIELD_PREP(AD74115_DIN_DEBOUNCE_MASK, val)); } static int ad74115_comp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { return GPIO_LINE_DIRECTION_IN; } static int ad74115_comp_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { struct ad74115_state *st = gpiochip_get_data(chip); u32 param = pinconf_to_config_param(config); u32 arg = pinconf_to_config_argument(config); switch (param) { case PIN_CONFIG_INPUT_DEBOUNCE: return ad74115_set_comp_debounce(st, arg); default: return -ENOTSUPP; } } static int ad74115_comp_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct ad74115_state *st = gpiochip_get_data(chip); unsigned int val; int ret; ret = regmap_read(st->regmap, AD74115_DIN_COMP_OUT_REG, &val); if (ret) return ret; return !!val; } static irqreturn_t ad74115_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ad74115_state *st = iio_priv(indio_dev); int ret; ret = spi_sync(st->spi, &st->adc_samples_msg); if (ret) goto out; iio_push_to_buffers(indio_dev, st->adc_samples_rx_buf); out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static irqreturn_t ad74115_adc_data_interrupt(int irq, void *data) { struct iio_dev *indio_dev = data; struct ad74115_state *st = iio_priv(indio_dev); if (iio_buffer_enabled(indio_dev)) iio_trigger_poll(st->trig); else complete(&st->adc_data_completion); return IRQ_HANDLED; } static int ad74115_set_adc_ch_en(struct ad74115_state *st, enum ad74115_adc_ch channel, bool status) { unsigned int mask = ad74115_adc_ch_en_bit_tbl[channel]; return regmap_update_bits(st->regmap, AD74115_ADC_CONV_CTRL_REG, mask, status ? mask : 0); } static int ad74115_set_adc_conv_seq(struct ad74115_state *st, enum ad74115_adc_conv_seq conv_seq) { return regmap_update_bits(st->regmap, AD74115_ADC_CONV_CTRL_REG, AD74115_ADC_CONV_SEQ_MASK, FIELD_PREP(AD74115_ADC_CONV_SEQ_MASK, conv_seq)); } static int ad74115_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *active_scan_mask) { struct ad74115_state *st = iio_priv(indio_dev); struct spi_transfer *xfer = st->adc_samples_xfer; u8 *rx_buf = st->adc_samples_rx_buf; u8 *tx_buf = st->adc_samples_tx_buf; unsigned int i; int ret = 0; mutex_lock(&st->lock); spi_message_init(&st->adc_samples_msg); for_each_clear_bit(i, active_scan_mask, AD74115_ADC_CH_NUM) { ret = ad74115_set_adc_ch_en(st, i, false); if (ret) goto out; } /* * The read select register is used to select which register's value * will be sent by the slave on the next SPI frame. * * Create an SPI message that, on each step, writes to the read select * register to select the ADC result of the next enabled channel, and * reads the ADC result of the previous enabled channel. * * Example: * W: [WCH1] [WCH2] [WCH2] [WCH3] [ ] * R: [ ] [RCH1] [RCH2] [RCH3] [RCH4] */ for_each_set_bit(i, active_scan_mask, AD74115_ADC_CH_NUM) { ret = ad74115_set_adc_ch_en(st, i, true); if (ret) goto out; if (xfer == st->adc_samples_xfer) xfer->rx_buf = NULL; else xfer->rx_buf = rx_buf; xfer->tx_buf = tx_buf; xfer->len = AD74115_FRAME_SIZE; xfer->cs_change = 1; ad74115_format_reg_write(AD74115_READ_SELECT_REG, ad74115_adc_ch_data_regs_tbl[i], tx_buf); spi_message_add_tail(xfer, &st->adc_samples_msg); tx_buf += AD74115_FRAME_SIZE; if (xfer != st->adc_samples_xfer) rx_buf += AD74115_FRAME_SIZE; xfer++; } xfer->rx_buf = rx_buf; xfer->tx_buf = NULL; xfer->len = AD74115_FRAME_SIZE; xfer->cs_change = 0; spi_message_add_tail(xfer, &st->adc_samples_msg); out: mutex_unlock(&st->lock); return ret; } static int ad74115_buffer_postenable(struct iio_dev *indio_dev) { struct ad74115_state *st = iio_priv(indio_dev); return ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_CONTINUOUS); } static int ad74115_buffer_predisable(struct iio_dev *indio_dev) { struct ad74115_state *st = iio_priv(indio_dev); unsigned int i; int ret; mutex_lock(&st->lock); ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_STANDBY); if (ret) goto out; /* * update_scan_mode() is not called in the disable path, disable all * channels here. */ for (i = 0; i < AD74115_ADC_CH_NUM; i++) { ret = ad74115_set_adc_ch_en(st, i, false); if (ret) goto out; } out: mutex_unlock(&st->lock); return ret; } static const struct iio_buffer_setup_ops ad74115_buffer_ops = { .postenable = &ad74115_buffer_postenable, .predisable = &ad74115_buffer_predisable, }; static const struct iio_trigger_ops ad74115_trigger_ops = { .validate_device = iio_trigger_validate_own_device, }; static int ad74115_get_adc_rate(struct ad74115_state *st, enum ad74115_adc_ch channel, int *val) { unsigned int i; int ret; ret = regmap_read(st->regmap, AD74115_ADC_CONFIG_REG, &i); if (ret) return ret; if (channel == AD74115_ADC_CH_CONV1) i = FIELD_GET(AD74115_ADC_CONFIG_CONV1_RATE_MASK, i); else i = FIELD_GET(AD74115_ADC_CONFIG_CONV2_RATE_MASK, i); *val = ad74115_adc_conv_rate_tbl[i]; return IIO_VAL_INT; } static int _ad74115_get_adc_code(struct ad74115_state *st, enum ad74115_adc_ch channel, int *val) { unsigned int uval; int ret; reinit_completion(&st->adc_data_completion); ret = ad74115_set_adc_ch_en(st, channel, true); if (ret) return ret; ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_SINGLE); if (ret) return ret; if (st->irq) { ret = wait_for_completion_timeout(&st->adc_data_completion, msecs_to_jiffies(1000)); if (!ret) return -ETIMEDOUT; } else { unsigned int regval, wait_time; int rate; ret = ad74115_get_adc_rate(st, channel, &rate); if (ret < 0) return ret; wait_time = DIV_ROUND_CLOSEST(AD74115_CONV_TIME_US, rate); ret = regmap_read_poll_timeout(st->regmap, AD74115_LIVE_STATUS_REG, regval, regval & AD74115_ADC_DATA_RDY_MASK, wait_time, 5 * wait_time); if (ret) return ret; /* * The ADC_DATA_RDY bit is W1C. * See datasheet page 98, Table 62. Bit Descriptions for * LIVE_STATUS. * Although the datasheet mentions that the bit will auto-clear * when writing to the ADC_CONV_CTRL register, this does not * seem to happen. */ ret = regmap_write_bits(st->regmap, AD74115_LIVE_STATUS_REG, AD74115_ADC_DATA_RDY_MASK, FIELD_PREP(AD74115_ADC_DATA_RDY_MASK, 1)); if (ret) return ret; } ret = regmap_read(st->regmap, ad74115_adc_ch_data_regs_tbl[channel], &uval); if (ret) return ret; ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_STANDBY); if (ret) return ret; ret = ad74115_set_adc_ch_en(st, channel, false); if (ret) return ret; *val = uval; return IIO_VAL_INT; } static int ad74115_get_adc_code(struct iio_dev *indio_dev, enum ad74115_adc_ch channel, int *val) { struct ad74115_state *st = iio_priv(indio_dev); int ret; ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; mutex_lock(&st->lock); ret = _ad74115_get_adc_code(st, channel, val); mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); return ret; } static int ad74115_adc_code_to_resistance(int code, int *val, int *val2) { if (code == AD74115_ADC_CODE_MAX) code--; *val = code * AD74115_REF_RESISTOR_OHMS; *val2 = AD74115_ADC_CODE_MAX - code; return IIO_VAL_FRACTIONAL; } static int ad74115_set_dac_code(struct ad74115_state *st, enum ad74115_dac_ch channel, int val) { if (val < 0) return -EINVAL; if (channel == AD74115_DAC_CH_COMPARATOR) { if (val > AD74115_COMP_THRESH_MAX) return -EINVAL; return regmap_update_bits(st->regmap, AD74115_DIN_CONFIG2_REG, AD74115_COMP_THRESH_MASK, FIELD_PREP(AD74115_COMP_THRESH_MASK, val)); } if (val > AD74115_DAC_CODE_MAX) return -EINVAL; return regmap_write(st->regmap, AD74115_DAC_CODE_REG, val); } static int ad74115_get_dac_code(struct ad74115_state *st, enum ad74115_dac_ch channel, int *val) { unsigned int uval; int ret; if (channel == AD74115_DAC_CH_COMPARATOR) return -EINVAL; ret = regmap_read(st->regmap, AD74115_DAC_ACTIVE_REG, &uval); if (ret) return ret; *val = uval; return IIO_VAL_INT; } static int ad74115_set_adc_rate(struct ad74115_state *st, enum ad74115_adc_ch channel, int val) { unsigned int i; int ret; ret = ad74115_find_tbl_index(ad74115_adc_conv_rate_tbl, val, &i); if (ret) return ret; if (channel == AD74115_ADC_CH_CONV1) return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, AD74115_ADC_CONFIG_CONV1_RATE_MASK, FIELD_PREP(AD74115_ADC_CONFIG_CONV1_RATE_MASK, i)); return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, AD74115_ADC_CONFIG_CONV2_RATE_MASK, FIELD_PREP(AD74115_ADC_CONFIG_CONV2_RATE_MASK, i)); } static int ad74115_get_dac_rate(struct ad74115_state *st, int *val) { unsigned int i, en_val, step_val, rate_val, tmp; int ret; ret = regmap_read(st->regmap, AD74115_OUTPUT_CONFIG_REG, &tmp); if (ret) return ret; en_val = FIELD_GET(AD74115_OUTPUT_SLEW_EN_MASK, tmp); step_val = FIELD_GET(AD74115_OUTPUT_SLEW_LIN_STEP_MASK, tmp); rate_val = FIELD_GET(AD74115_OUTPUT_SLEW_LIN_RATE_MASK, tmp); for (i = 0; i < ARRAY_SIZE(ad74115_dac_rate_step_tbl); i++) if (en_val == ad74115_dac_rate_step_tbl[i][0] && step_val == ad74115_dac_rate_step_tbl[i][1] && rate_val == ad74115_dac_rate_step_tbl[i][2]) break; if (i == ARRAY_SIZE(ad74115_dac_rate_step_tbl)) return -EINVAL; *val = ad74115_dac_rate_tbl[i]; return IIO_VAL_INT; } static int ad74115_set_dac_rate(struct ad74115_state *st, int val) { unsigned int i, en_val, step_val, rate_val, mask, tmp; int ret; ret = ad74115_find_tbl_index(ad74115_dac_rate_tbl, val, &i); if (ret) return ret; en_val = ad74115_dac_rate_step_tbl[i][0]; step_val = ad74115_dac_rate_step_tbl[i][1]; rate_val = ad74115_dac_rate_step_tbl[i][2]; mask = AD74115_OUTPUT_SLEW_EN_MASK; mask |= AD74115_OUTPUT_SLEW_LIN_STEP_MASK; mask |= AD74115_OUTPUT_SLEW_LIN_RATE_MASK; tmp = FIELD_PREP(AD74115_OUTPUT_SLEW_EN_MASK, en_val); tmp |= FIELD_PREP(AD74115_OUTPUT_SLEW_LIN_STEP_MASK, step_val); tmp |= FIELD_PREP(AD74115_OUTPUT_SLEW_LIN_RATE_MASK, rate_val); return regmap_update_bits(st->regmap, AD74115_OUTPUT_CONFIG_REG, mask, tmp); } static int ad74115_get_dac_scale(struct ad74115_state *st, struct iio_chan_spec const *chan, int *val, int *val2) { if (chan->channel == AD74115_DAC_CH_MAIN) { if (chan->type == IIO_VOLTAGE) { *val = AD74115_DAC_VOLTAGE_MAX; if (st->dac_bipolar) *val *= 2; } else { *val = AD74115_DAC_CURRENT_MAX; } *val2 = AD74115_DAC_CODE_MAX; } else { if (st->din_threshold_mode == AD74115_DIN_THRESHOLD_MODE_AVDD) { *val = 196 * st->avdd_mv; *val2 = 10 * AD74115_COMP_THRESH_MAX; } else { *val = 49000; *val2 = AD74115_COMP_THRESH_MAX; } } return IIO_VAL_FRACTIONAL; } static int ad74115_get_dac_offset(struct ad74115_state *st, struct iio_chan_spec const *chan, int *val) { if (chan->channel == AD74115_DAC_CH_MAIN) { if (chan->type == IIO_VOLTAGE && st->dac_bipolar) *val = -AD74115_DAC_CODE_HALF; else *val = 0; } else { if (st->din_threshold_mode == AD74115_DIN_THRESHOLD_MODE_AVDD) *val = -48; else *val = -38; } return IIO_VAL_INT; } static int ad74115_get_adc_range(struct ad74115_state *st, enum ad74115_adc_ch channel, unsigned int *val) { int ret; ret = regmap_read(st->regmap, AD74115_ADC_CONFIG_REG, val); if (ret) return ret; if (channel == AD74115_ADC_CH_CONV1) *val = FIELD_GET(AD74115_ADC_CONFIG_CONV1_RANGE_MASK, *val); else *val = FIELD_GET(AD74115_ADC_CONFIG_CONV2_RANGE_MASK, *val); return 0; } static int ad74115_get_adc_resistance_scale(struct ad74115_state *st, unsigned int range, int *val, int *val2) { *val = ad74115_adc_gain_tbl[range][1] * AD74115_REF_RESISTOR_OHMS; *val2 = ad74115_adc_gain_tbl[range][0]; if (ad74115_adc_bipolar_tbl[range]) *val2 *= AD74115_ADC_CODE_HALF; else *val2 *= AD74115_ADC_CODE_MAX; return IIO_VAL_FRACTIONAL; } static int ad74115_get_adc_scale(struct ad74115_state *st, struct iio_chan_spec const *chan, int *val, int *val2) { unsigned int range; int ret; ret = ad74115_get_adc_range(st, chan->channel, &range); if (ret) return ret; if (chan->type == IIO_RESISTANCE) return ad74115_get_adc_resistance_scale(st, range, val, val2); *val = ad74115_adc_conv_mul_tbl[range]; *val2 = AD74115_ADC_CODE_MAX; if (chan->type == IIO_CURRENT) *val2 *= AD74115_SENSE_RESISTOR_OHMS; return IIO_VAL_FRACTIONAL; } static int ad74115_get_adc_resistance_offset(struct ad74115_state *st, unsigned int range, int *val, int *val2) { unsigned int d = 10 * AD74115_REF_RESISTOR_OHMS * ad74115_adc_gain_tbl[range][1]; *val = 5; if (ad74115_adc_bipolar_tbl[range]) *val -= AD74115_ADC_CODE_HALF; *val *= d; if (!st->rtd_mode_4_wire) { /* Add 0.2 Ohm to the final result for 3-wire RTD. */ unsigned int v = 2 * ad74115_adc_gain_tbl[range][0]; if (ad74115_adc_bipolar_tbl[range]) v *= AD74115_ADC_CODE_HALF; else v *= AD74115_ADC_CODE_MAX; *val += v; } *val2 = d; return IIO_VAL_FRACTIONAL; } static int ad74115_get_adc_offset(struct ad74115_state *st, struct iio_chan_spec const *chan, int *val, int *val2) { unsigned int range; int ret; ret = ad74115_get_adc_range(st, chan->channel, &range); if (ret) return ret; if (chan->type == IIO_RESISTANCE) return ad74115_get_adc_resistance_offset(st, range, val, val2); if (ad74115_adc_bipolar_tbl[range]) *val = -AD74115_ADC_CODE_HALF; else if (range == AD74115_ADC_RANGE_2_5V_NEG) *val = -AD74115_ADC_CODE_MAX; else *val = 0; return IIO_VAL_INT; } static int ad74115_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct ad74115_state *st = iio_priv(indio_dev); int ret; switch (info) { case IIO_CHAN_INFO_RAW: if (chan->output) return ad74115_get_dac_code(st, chan->channel, val); return ad74115_get_adc_code(indio_dev, chan->channel, val); case IIO_CHAN_INFO_PROCESSED: ret = ad74115_get_adc_code(indio_dev, chan->channel, val); if (ret) return ret; return ad74115_adc_code_to_resistance(*val, val, val2); case IIO_CHAN_INFO_SCALE: if (chan->output) return ad74115_get_dac_scale(st, chan, val, val2); return ad74115_get_adc_scale(st, chan, val, val2); case IIO_CHAN_INFO_OFFSET: if (chan->output) return ad74115_get_dac_offset(st, chan, val); return ad74115_get_adc_offset(st, chan, val, val2); case IIO_CHAN_INFO_SAMP_FREQ: if (chan->output) return ad74115_get_dac_rate(st, val); return ad74115_get_adc_rate(st, chan->channel, val); default: return -EINVAL; } } static int ad74115_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long info) { struct ad74115_state *st = iio_priv(indio_dev); switch (info) { case IIO_CHAN_INFO_RAW: if (!chan->output) return -EINVAL; return ad74115_set_dac_code(st, chan->channel, val); case IIO_CHAN_INFO_SAMP_FREQ: if (chan->output) return ad74115_set_dac_rate(st, val); return ad74115_set_adc_rate(st, chan->channel, val); default: return -EINVAL; } } static int ad74115_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long info) { switch (info) { case IIO_CHAN_INFO_SAMP_FREQ: if (chan->output) { *vals = ad74115_dac_rate_tbl; *length = ARRAY_SIZE(ad74115_dac_rate_tbl); } else { *vals = ad74115_adc_conv_rate_tbl; *length = ARRAY_SIZE(ad74115_adc_conv_rate_tbl); } *type = IIO_VAL_INT; return IIO_AVAIL_LIST; default: return -EINVAL; } } static int ad74115_reg_access(struct iio_dev *indio_dev, unsigned int reg, unsigned int writeval, unsigned int *readval) { struct ad74115_state *st = iio_priv(indio_dev); if (readval) return regmap_read(st->regmap, reg, readval); return regmap_write(st->regmap, reg, writeval); } static const struct iio_info ad74115_info = { .read_raw = ad74115_read_raw, .write_raw = ad74115_write_raw, .read_avail = ad74115_read_avail, .update_scan_mode = ad74115_update_scan_mode, .debugfs_reg_access = ad74115_reg_access, }; #define AD74115_DAC_CHANNEL(_type, index) \ { \ .type = (_type), \ .channel = (index), \ .indexed = 1, \ .output = 1, \ .scan_index = -1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ | BIT(IIO_CHAN_INFO_SCALE) \ | BIT(IIO_CHAN_INFO_OFFSET), \ } #define _AD74115_ADC_CHANNEL(_type, index, extra_mask_separate) \ { \ .type = (_type), \ .channel = (index), \ .indexed = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ | BIT(IIO_CHAN_INFO_SAMP_FREQ) \ | (extra_mask_separate), \ .info_mask_separate_available = \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = index, \ .scan_type = { \ .sign = 'u', \ .realbits = 16, \ .storagebits = 32, \ .shift = 8, \ .endianness = IIO_BE, \ }, \ } #define AD74115_ADC_CHANNEL(_type, index) \ _AD74115_ADC_CHANNEL(_type, index, BIT(IIO_CHAN_INFO_SCALE) \ | BIT(IIO_CHAN_INFO_OFFSET)) static const struct iio_chan_spec ad74115_voltage_input_channels[] = { AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_voltage_output_channels[] = { AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_MAIN), AD74115_ADC_CHANNEL(IIO_CURRENT, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_current_input_channels[] = { AD74115_ADC_CHANNEL(IIO_CURRENT, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_current_output_channels[] = { AD74115_DAC_CHANNEL(IIO_CURRENT, AD74115_DAC_CH_MAIN), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_2_wire_resistance_input_channels[] = { _AD74115_ADC_CHANNEL(IIO_RESISTANCE, AD74115_ADC_CH_CONV1, BIT(IIO_CHAN_INFO_PROCESSED)), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_3_4_wire_resistance_input_channels[] = { AD74115_ADC_CHANNEL(IIO_RESISTANCE, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_digital_input_logic_channels[] = { AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_COMPARATOR), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; static const struct iio_chan_spec ad74115_digital_input_loop_channels[] = { AD74115_DAC_CHANNEL(IIO_CURRENT, AD74115_DAC_CH_MAIN), AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_COMPARATOR), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), }; #define _AD74115_CHANNELS(_channels) \ { \ .channels = _channels, \ .num_channels = ARRAY_SIZE(_channels), \ } #define AD74115_CHANNELS(name) \ _AD74115_CHANNELS(ad74115_ ## name ## _channels) static const struct ad74115_channels ad74115_channels_map[AD74115_CH_FUNC_NUM] = { [AD74115_CH_FUNC_HIGH_IMPEDANCE] = AD74115_CHANNELS(voltage_input), [AD74115_CH_FUNC_VOLTAGE_INPUT] = AD74115_CHANNELS(voltage_input), [AD74115_CH_FUNC_VOLTAGE_OUTPUT] = AD74115_CHANNELS(voltage_output), [AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER] = AD74115_CHANNELS(current_input), [AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER] = AD74115_CHANNELS(current_input), [AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER_HART] = AD74115_CHANNELS(current_input), [AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART] = AD74115_CHANNELS(current_input), [AD74115_CH_FUNC_CURRENT_OUTPUT] = AD74115_CHANNELS(current_output), [AD74115_CH_FUNC_CURRENT_OUTPUT_HART] = AD74115_CHANNELS(current_output), [AD74115_CH_FUNC_2_WIRE_RESISTANCE_INPUT] = AD74115_CHANNELS(2_wire_resistance_input), [AD74115_CH_FUNC_3_4_WIRE_RESISTANCE_INPUT] = AD74115_CHANNELS(3_4_wire_resistance_input), [AD74115_CH_FUNC_DIGITAL_INPUT_LOGIC] = AD74115_CHANNELS(digital_input_logic), [AD74115_CH_FUNC_DIGITAL_INPUT_LOOP_POWER] = AD74115_CHANNELS(digital_input_loop), }; #define AD74115_GPIO_MODE_FW_PROP(i) \ { \ .name = "adi,gpio" __stringify(i) "-mode", \ .reg = AD74115_GPIO_CONFIG_X_REG(i), \ .mask = AD74115_GPIO_CONFIG_SELECT_MASK, \ .lookup_tbl = ad74115_gpio_mode_tbl, \ .lookup_tbl_len = ARRAY_SIZE(ad74115_gpio_mode_tbl), \ } static const struct ad74115_fw_prop ad74115_gpio_mode_fw_props[] = { AD74115_GPIO_MODE_FW_PROP(0), AD74115_GPIO_MODE_FW_PROP(1), AD74115_GPIO_MODE_FW_PROP(2), AD74115_GPIO_MODE_FW_PROP(3), }; static const struct ad74115_fw_prop ad74115_din_threshold_mode_fw_prop = AD74115_FW_PROP_BOOL("adi,digital-input-threshold-mode-fixed", AD74115_DIN_CONFIG2_REG, BIT(7)); static const struct ad74115_fw_prop ad74115_dac_bipolar_fw_prop = AD74115_FW_PROP_BOOL("adi,dac-bipolar", AD74115_OUTPUT_CONFIG_REG, BIT(7)); static const struct ad74115_fw_prop ad74115_ch_func_fw_prop = AD74115_FW_PROP("adi,ch-func", AD74115_CH_FUNC_MAX, AD74115_CH_FUNC_SETUP_REG, GENMASK(3, 0)); static const struct ad74115_fw_prop ad74115_rtd_mode_fw_prop = AD74115_FW_PROP_BOOL("adi,4-wire-rtd", AD74115_RTD3W4W_CONFIG_REG, BIT(3)); static const struct ad74115_fw_prop ad74115_din_range_fw_prop = AD74115_FW_PROP_BOOL("adi,digital-input-sink-range-high", AD74115_DIN_CONFIG1_REG, BIT(12)); static const struct ad74115_fw_prop ad74115_ext2_burnout_current_fw_prop = AD74115_FW_PROP_TBL("adi,ext2-burnout-current-nanoamp", ad74115_burnout_current_na_tbl, AD74115_BURNOUT_CONFIG_REG, GENMASK(14, 12)); static const struct ad74115_fw_prop ad74115_ext1_burnout_current_fw_prop = AD74115_FW_PROP_TBL("adi,ext1-burnout-current-nanoamp", ad74115_burnout_current_na_tbl, AD74115_BURNOUT_CONFIG_REG, GENMASK(9, 7)); static const struct ad74115_fw_prop ad74115_viout_burnout_current_fw_prop = AD74115_FW_PROP_TBL("adi,viout-burnout-current-nanoamp", ad74115_viout_burnout_current_na_tbl, AD74115_BURNOUT_CONFIG_REG, GENMASK(4, 2)); static const struct ad74115_fw_prop ad74115_fw_props[] = { AD74115_FW_PROP("adi,conv2-mux", 3, AD74115_ADC_CONFIG_REG, GENMASK(3, 2)), AD74115_FW_PROP_BOOL_NEG("adi,sense-agnd-buffer-low-power", AD74115_PWR_OPTIM_CONFIG_REG, BIT(4)), AD74115_FW_PROP_BOOL_NEG("adi,lf-buffer-low-power", AD74115_PWR_OPTIM_CONFIG_REG, BIT(3)), AD74115_FW_PROP_BOOL_NEG("adi,hf-buffer-low-power", AD74115_PWR_OPTIM_CONFIG_REG, BIT(2)), AD74115_FW_PROP_BOOL_NEG("adi,ext2-buffer-low-power", AD74115_PWR_OPTIM_CONFIG_REG, BIT(1)), AD74115_FW_PROP_BOOL_NEG("adi,ext1-buffer-low-power", AD74115_PWR_OPTIM_CONFIG_REG, BIT(0)), AD74115_FW_PROP_BOOL("adi,comparator-invert", AD74115_DIN_CONFIG1_REG, BIT(14)), AD74115_FW_PROP_BOOL("adi,digital-input-debounce-mode-counter-reset", AD74115_DIN_CONFIG1_REG, BIT(6)), AD74115_FW_PROP_BOOL("adi,digital-input-unbuffered", AD74115_DIN_CONFIG2_REG, BIT(10)), AD74115_FW_PROP_BOOL("adi,digital-input-short-circuit-detection", AD74115_DIN_CONFIG2_REG, BIT(9)), AD74115_FW_PROP_BOOL("adi,digital-input-open-circuit-detection", AD74115_DIN_CONFIG2_REG, BIT(8)), AD74115_FW_PROP_BOOL("adi,dac-current-limit-low", AD74115_OUTPUT_CONFIG_REG, BIT(0)), AD74115_FW_PROP_BOOL("adi,3-wire-rtd-excitation-swap", AD74115_RTD3W4W_CONFIG_REG, BIT(2)), AD74115_FW_PROP_TBL("adi,rtd-excitation-current-microamp", ad74115_rtd_excitation_current_ua_tbl, AD74115_RTD3W4W_CONFIG_REG, GENMASK(1, 0)), AD74115_FW_PROP_BOOL("adi,ext2-burnout-current-polarity-sourcing", AD74115_BURNOUT_CONFIG_REG, BIT(11)), AD74115_FW_PROP_BOOL("adi,ext1-burnout-current-polarity-sourcing", AD74115_BURNOUT_CONFIG_REG, BIT(6)), AD74115_FW_PROP_BOOL("adi,viout-burnout-current-polarity-sourcing", AD74115_BURNOUT_CONFIG_REG, BIT(1)), AD74115_FW_PROP_BOOL("adi,charge-pump", AD74115_CHARGE_PUMP_REG, BIT(0)), }; static int ad74115_apply_fw_prop(struct ad74115_state *st, const struct ad74115_fw_prop *prop, u32 *retval) { struct device *dev = &st->spi->dev; u32 val = 0; int ret; if (prop->is_boolean) { val = device_property_read_bool(dev, prop->name); } else { ret = device_property_read_u32(dev, prop->name, &val); if (ret && prop->lookup_tbl) val = prop->lookup_tbl[0]; } *retval = val; if (prop->negate) val = !val; if (prop->lookup_tbl) ret = _ad74115_find_tbl_index(prop->lookup_tbl, prop->lookup_tbl_len, val, &val); else if (prop->max && val > prop->max) ret = -EINVAL; else ret = 0; if (ret) return dev_err_probe(dev, -EINVAL, "Invalid value %u for prop %s\n", val, prop->name); WARN(!prop->mask, "Prop %s mask is empty\n", prop->name); val = (val << __ffs(prop->mask)) & prop->mask; return regmap_update_bits(st->regmap, prop->reg, prop->mask, val); } static int ad74115_setup_adc_conv2_range(struct ad74115_state *st) { unsigned int tbl_len = ARRAY_SIZE(ad74115_adc_range_tbl); const char *prop_name = "adi,conv2-range-microvolt"; s32 vals[2] = { ad74115_adc_range_tbl[0][0], ad74115_adc_range_tbl[0][1], }; struct device *dev = &st->spi->dev; unsigned int i; device_property_read_u32_array(dev, prop_name, vals, 2); for (i = 0; i < tbl_len; i++) if (vals[0] == ad74115_adc_range_tbl[i][0] && vals[1] == ad74115_adc_range_tbl[i][1]) break; if (i == tbl_len) return dev_err_probe(dev, -EINVAL, "Invalid value %d, %d for prop %s\n", vals[0], vals[1], prop_name); return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, AD74115_ADC_CONFIG_CONV2_RANGE_MASK, FIELD_PREP(AD74115_ADC_CONFIG_CONV2_RANGE_MASK, i)); } static int ad74115_setup_iio_channels(struct iio_dev *indio_dev) { struct ad74115_state *st = iio_priv(indio_dev); struct device *dev = &st->spi->dev; struct iio_chan_spec *channels; channels = devm_kcalloc(dev, sizeof(*channels), indio_dev->num_channels, GFP_KERNEL); if (!channels) return -ENOMEM; indio_dev->channels = channels; memcpy(channels, ad74115_channels_map[st->ch_func].channels, sizeof(*channels) * ad74115_channels_map[st->ch_func].num_channels); if (channels[0].output && channels[0].channel == AD74115_DAC_CH_MAIN && channels[0].type == IIO_VOLTAGE && !st->dac_hart_slew) { channels[0].info_mask_separate |= BIT(IIO_CHAN_INFO_SAMP_FREQ); channels[0].info_mask_separate_available |= BIT(IIO_CHAN_INFO_SAMP_FREQ); } return 0; } static int ad74115_setup_gpio_chip(struct ad74115_state *st) { struct device *dev = &st->spi->dev; if (!st->gpio_valid_mask) return 0; st->gc = (struct gpio_chip) { .owner = THIS_MODULE, .label = AD74115_NAME, .base = -1, .ngpio = AD74115_GPIO_NUM, .parent = dev, .can_sleep = true, .init_valid_mask = ad74115_gpio_init_valid_mask, .get_direction = ad74115_gpio_get_direction, .direction_input = ad74115_gpio_direction_input, .direction_output = ad74115_gpio_direction_output, .get = ad74115_gpio_get, .set = ad74115_gpio_set, }; return devm_gpiochip_add_data(dev, &st->gc, st); } static int ad74115_setup_comp_gpio_chip(struct ad74115_state *st) { struct device *dev = &st->spi->dev; u32 val; int ret; ret = regmap_read(st->regmap, AD74115_DIN_CONFIG1_REG, &val); if (ret) return ret; if (!(val & AD74115_DIN_COMPARATOR_EN_MASK)) return 0; st->comp_gc = (struct gpio_chip) { .owner = THIS_MODULE, .label = AD74115_NAME, .base = -1, .ngpio = 1, .parent = dev, .can_sleep = true, .get_direction = ad74115_comp_gpio_get_direction, .get = ad74115_comp_gpio_get, .set_config = ad74115_comp_gpio_set_config, }; return devm_gpiochip_add_data(dev, &st->comp_gc, st); } static int ad74115_setup(struct iio_dev *indio_dev) { struct ad74115_state *st = iio_priv(indio_dev); struct device *dev = &st->spi->dev; u32 val, din_range_high; unsigned int i; int ret; ret = ad74115_apply_fw_prop(st, &ad74115_ch_func_fw_prop, &val); if (ret) return ret; indio_dev->num_channels += ad74115_channels_map[val].num_channels; st->ch_func = val; ret = ad74115_setup_adc_conv2_range(st); if (ret) return ret; val = device_property_read_bool(dev, "adi,dac-hart-slew"); if (val) { st->dac_hart_slew = val; ret = regmap_update_bits(st->regmap, AD74115_OUTPUT_CONFIG_REG, AD74115_OUTPUT_SLEW_EN_MASK, FIELD_PREP(AD74115_OUTPUT_SLEW_EN_MASK, AD74115_SLEW_MODE_HART)); if (ret) return ret; } ret = ad74115_apply_fw_prop(st, &ad74115_din_range_fw_prop, &din_range_high); if (ret) return ret; ret = device_property_read_u32(dev, "adi,digital-input-sink-microamp", &val); if (!ret) { if (din_range_high) val = DIV_ROUND_CLOSEST(val, AD74115_DIN_SINK_LOW_STEP); else val = DIV_ROUND_CLOSEST(val, AD74115_DIN_SINK_HIGH_STEP); if (val > AD74115_DIN_SINK_MAX) val = AD74115_DIN_SINK_MAX; ret = regmap_update_bits(st->regmap, AD74115_DIN_CONFIG1_REG, AD74115_DIN_SINK_MASK, FIELD_PREP(AD74115_DIN_SINK_MASK, val)); if (ret) return ret; } ret = ad74115_apply_fw_prop(st, &ad74115_din_threshold_mode_fw_prop, &val); if (ret) return ret; if (val == AD74115_DIN_THRESHOLD_MODE_AVDD && !st->avdd_mv) return dev_err_probe(dev, -EINVAL, "AVDD voltage is required for digital input threshold mode AVDD\n"); st->din_threshold_mode = val; ret = ad74115_apply_fw_prop(st, &ad74115_dac_bipolar_fw_prop, &val); if (ret) return ret; st->dac_bipolar = val; ret = ad74115_apply_fw_prop(st, &ad74115_rtd_mode_fw_prop, &val); if (ret) return ret; st->rtd_mode_4_wire = val; ret = ad74115_apply_fw_prop(st, &ad74115_ext2_burnout_current_fw_prop, &val); if (ret) return ret; if (val) { ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, AD74115_BURNOUT_EXT2_EN_MASK, FIELD_PREP(AD74115_BURNOUT_EXT2_EN_MASK, 1)); if (ret) return ret; } ret = ad74115_apply_fw_prop(st, &ad74115_ext1_burnout_current_fw_prop, &val); if (ret) return ret; if (val) { ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, AD74115_BURNOUT_EXT1_EN_MASK, FIELD_PREP(AD74115_BURNOUT_EXT1_EN_MASK, 1)); if (ret) return ret; } ret = ad74115_apply_fw_prop(st, &ad74115_viout_burnout_current_fw_prop, &val); if (ret) return ret; if (val) { ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, AD74115_BURNOUT_VIOUT_EN_MASK, FIELD_PREP(AD74115_BURNOUT_VIOUT_EN_MASK, 1)); if (ret) return ret; } for (i = 0; i < AD74115_GPIO_NUM; i++) { ret = ad74115_apply_fw_prop(st, &ad74115_gpio_mode_fw_props[i], &val); if (ret) return ret; if (val == AD74115_GPIO_MODE_LOGIC) st->gpio_valid_mask |= BIT(i); } for (i = 0; i < ARRAY_SIZE(ad74115_fw_props); i++) { ret = ad74115_apply_fw_prop(st, &ad74115_fw_props[i], &val); if (ret) return ret; } ret = ad74115_setup_gpio_chip(st); if (ret) return ret; ret = ad74115_setup_comp_gpio_chip(st); if (ret) return ret; return ad74115_setup_iio_channels(indio_dev); } static int ad74115_reset(struct ad74115_state *st) { struct device *dev = &st->spi->dev; struct gpio_desc *reset_gpio; int ret; reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset_gpio)) return dev_err_probe(dev, PTR_ERR(reset_gpio), "Failed to find reset GPIO\n"); if (reset_gpio) { fsleep(100); gpiod_set_value_cansleep(reset_gpio, 0); } else { ret = regmap_write(st->regmap, AD74115_CMD_KEY_REG, AD74115_CMD_KEY_RESET1); if (ret) return ret; ret = regmap_write(st->regmap, AD74115_CMD_KEY_REG, AD74115_CMD_KEY_RESET2); if (ret) return ret; } fsleep(1000); return 0; } static int ad74115_setup_trigger(struct iio_dev *indio_dev) { struct ad74115_state *st = iio_priv(indio_dev); struct device *dev = &st->spi->dev; int ret; st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "adc_rdy"); if (st->irq == -EPROBE_DEFER) return -EPROBE_DEFER; if (st->irq < 0) { st->irq = 0; return 0; } ret = devm_request_irq(dev, st->irq, ad74115_adc_data_interrupt, 0, AD74115_NAME, indio_dev); if (ret) return ret; st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", AD74115_NAME, iio_device_id(indio_dev)); if (!st->trig) return -ENOMEM; st->trig->ops = &ad74115_trigger_ops; iio_trigger_set_drvdata(st->trig, st); ret = devm_iio_trigger_register(dev, st->trig); if (ret) return ret; indio_dev->trig = iio_trigger_get(st->trig); return 0; } static int ad74115_probe(struct spi_device *spi) { static const char * const regulator_names[] = { "avcc", "dvcc", "dovdd", "refin", }; struct device *dev = &spi->dev; struct ad74115_state *st; struct iio_dev *indio_dev; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); st->spi = spi; mutex_init(&st->lock); init_completion(&st->adc_data_completion); indio_dev->name = AD74115_NAME; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &ad74115_info; ret = devm_regulator_get_enable_read_voltage(dev, "avdd"); if (ret < 0) { /* * Since this is both a power supply and only optionally a * reference voltage, make sure to enable it even when the * voltage is not available. */ ret = devm_regulator_get_enable(dev, "avdd"); if (ret) return dev_err_probe(dev, ret, "failed to enable avdd\n"); } else { st->avdd_mv = ret / 1000; } ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), regulator_names); if (ret) return ret; st->regmap = devm_regmap_init(dev, NULL, st, &ad74115_regmap_config); if (IS_ERR(st->regmap)) return PTR_ERR(st->regmap); ret = ad74115_reset(st); if (ret) return ret; ret = ad74115_setup(indio_dev); if (ret) return ret; ret = ad74115_setup_trigger(indio_dev); if (ret) return ret; ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, ad74115_trigger_handler, &ad74115_buffer_ops); if (ret) return ret; return devm_iio_device_register(dev, indio_dev); } static int ad74115_unregister_driver(struct spi_driver *spi) { spi_unregister_driver(spi); return 0; } static int __init ad74115_register_driver(struct spi_driver *spi) { crc8_populate_msb(ad74115_crc8_table, AD74115_CRC_POLYNOMIAL); return spi_register_driver(spi); } static const struct spi_device_id ad74115_spi_id[] = { { "ad74115h" }, { } }; MODULE_DEVICE_TABLE(spi, ad74115_spi_id); static const struct of_device_id ad74115_dt_id[] = { { .compatible = "adi,ad74115h" }, { } }; MODULE_DEVICE_TABLE(of, ad74115_dt_id); static struct spi_driver ad74115_driver = { .driver = { .name = "ad74115", .of_match_table = ad74115_dt_id, }, .probe = ad74115_probe, .id_table = ad74115_spi_id, }; module_driver(ad74115_driver, ad74115_register_driver, ad74115_unregister_driver); MODULE_AUTHOR("Cosmin Tanislav <[email protected]>"); MODULE_DESCRIPTION("Analog Devices AD74115 ADDAC"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Microtune MT2131 "QAM/8VSB single chip tuner" * * Copyright (c) 2006 Steven Toth <[email protected]> */ #include <linux/module.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include <media/dvb_frontend.h> #include "mt2131.h" #include "mt2131_priv.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s: " fmt, "mt2131", ## arg) static u8 mt2131_config1[] = { 0x01, 0x50, 0x00, 0x50, 0x80, 0x00, 0x49, 0xfa, 0x88, 0x08, 0x77, 0x41, 0x04, 0x00, 0x00, 0x00, 0x32, 0x7f, 0xda, 0x4c, 0x00, 0x10, 0xaa, 0x78, 0x80, 0xff, 0x68, 0xa0, 0xff, 0xdd, 0x00, 0x00 }; static u8 mt2131_config2[] = { 0x10, 0x7f, 0xc8, 0x0a, 0x5f, 0x00, 0x04 }; static int mt2131_readreg(struct mt2131_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; if (i2c_transfer(priv->i2c, msg, 2) != 2) { printk(KERN_WARNING "mt2131 I2C read failed\n"); return -EREMOTEIO; } return 0; } static int mt2131_writereg(struct mt2131_priv *priv, u8 reg, u8 val) { u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2131 I2C write failed\n"); return -EREMOTEIO; } return 0; } static int mt2131_writeregs(struct mt2131_priv *priv,u8 *buf, u8 len) { struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2131 I2C write failed (len=%i)\n", (int)len); return -EREMOTEIO; } return 0; } static int mt2131_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct mt2131_priv *priv; int ret=0, i; u32 freq; u8 if_band_center; u32 f_lo1, f_lo2; u32 div1, num1, div2, num2; u8 b[8]; u8 lockval = 0; priv = fe->tuner_priv; freq = c->frequency / 1000; /* Hz -> kHz */ dprintk(1, "%s() freq=%d\n", __func__, freq); f_lo1 = freq + MT2131_IF1 * 1000; f_lo1 = (f_lo1 / 250) * 250; f_lo2 = f_lo1 - freq - MT2131_IF2; priv->frequency = (f_lo1 - f_lo2 - MT2131_IF2) * 1000; /* Frequency LO1 = 16MHz * (DIV1 + NUM1/8192 ) */ num1 = f_lo1 * 64 / (MT2131_FREF / 128); div1 = num1 / 8192; num1 &= 0x1fff; /* Frequency LO2 = 16MHz * (DIV2 + NUM2/8192 ) */ num2 = f_lo2 * 64 / (MT2131_FREF / 128); div2 = num2 / 8192; num2 &= 0x1fff; if (freq <= 82500) if_band_center = 0x00; else if (freq <= 137500) if_band_center = 0x01; else if (freq <= 192500) if_band_center = 0x02; else if (freq <= 247500) if_band_center = 0x03; else if (freq <= 302500) if_band_center = 0x04; else if (freq <= 357500) if_band_center = 0x05; else if (freq <= 412500) if_band_center = 0x06; else if (freq <= 467500) if_band_center = 0x07; else if (freq <= 522500) if_band_center = 0x08; else if (freq <= 577500) if_band_center = 0x09; else if (freq <= 632500) if_band_center = 0x0A; else if (freq <= 687500) if_band_center = 0x0B; else if (freq <= 742500) if_band_center = 0x0C; else if (freq <= 797500) if_band_center = 0x0D; else if (freq <= 852500) if_band_center = 0x0E; else if (freq <= 907500) if_band_center = 0x0F; else if (freq <= 962500) if_band_center = 0x10; else if (freq <= 1017500) if_band_center = 0x11; else if (freq <= 1072500) if_band_center = 0x12; else if_band_center = 0x13; b[0] = 1; b[1] = (num1 >> 5) & 0xFF; b[2] = (num1 & 0x1F); b[3] = div1; b[4] = (num2 >> 5) & 0xFF; b[5] = num2 & 0x1F; b[6] = div2; dprintk(1, "IF1: %dMHz IF2: %dMHz\n", MT2131_IF1, MT2131_IF2); dprintk(1, "PLL freq=%dkHz band=%d\n", (int)freq, (int)if_band_center); dprintk(1, "PLL f_lo1=%dkHz f_lo2=%dkHz\n", (int)f_lo1, (int)f_lo2); dprintk(1, "PLL div1=%d num1=%d div2=%d num2=%d\n", (int)div1, (int)num1, (int)div2, (int)num2); dprintk(1, "PLL [1..6]: %2x %2x %2x %2x %2x %2x\n", (int)b[1], (int)b[2], (int)b[3], (int)b[4], (int)b[5], (int)b[6]); ret = mt2131_writeregs(priv,b,7); if (ret < 0) return ret; mt2131_writereg(priv, 0x0b, if_band_center); /* Wait for lock */ i = 0; do { mt2131_readreg(priv, 0x08, &lockval); if ((lockval & 0x88) == 0x88) break; msleep(4); i++; } while (i < 10); return ret; } static int mt2131_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct mt2131_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *frequency = priv->frequency; return 0; } static int mt2131_get_status(struct dvb_frontend *fe, u32 *status) { struct mt2131_priv *priv = fe->tuner_priv; u8 lock_status = 0; u8 afc_status = 0; *status = 0; mt2131_readreg(priv, 0x08, &lock_status); if ((lock_status & 0x88) == 0x88) *status = TUNER_STATUS_LOCKED; mt2131_readreg(priv, 0x09, &afc_status); dprintk(1, "%s() - LO Status = 0x%x, AFC Status = 0x%x\n", __func__, lock_status, afc_status); return 0; } static int mt2131_init(struct dvb_frontend *fe) { struct mt2131_priv *priv = fe->tuner_priv; int ret; dprintk(1, "%s()\n", __func__); if ((ret = mt2131_writeregs(priv, mt2131_config1, sizeof(mt2131_config1))) < 0) return ret; mt2131_writereg(priv, 0x0b, 0x09); mt2131_writereg(priv, 0x15, 0x47); mt2131_writereg(priv, 0x07, 0xf2); mt2131_writereg(priv, 0x0b, 0x01); if ((ret = mt2131_writeregs(priv, mt2131_config2, sizeof(mt2131_config2))) < 0) return ret; return ret; } static void mt2131_release(struct dvb_frontend *fe) { dprintk(1, "%s()\n", __func__); kfree(fe->tuner_priv); fe->tuner_priv = NULL; } static const struct dvb_tuner_ops mt2131_tuner_ops = { .info = { .name = "Microtune MT2131", .frequency_min_hz = 48 * MHz, .frequency_max_hz = 860 * MHz, .frequency_step_hz = 50 * kHz, }, .release = mt2131_release, .init = mt2131_init, .set_params = mt2131_set_params, .get_frequency = mt2131_get_frequency, .get_status = mt2131_get_status }; struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2131_config *cfg, u16 if1) { struct mt2131_priv *priv = NULL; u8 id = 0; dprintk(1, "%s()\n", __func__); priv = kzalloc(sizeof(struct mt2131_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->cfg = cfg; priv->i2c = i2c; if (mt2131_readreg(priv, 0, &id) != 0) { kfree(priv); return NULL; } if ( (id != 0x3E) && (id != 0x3F) ) { printk(KERN_ERR "MT2131: Device not found at addr 0x%02x\n", cfg->i2c_address); kfree(priv); return NULL; } printk(KERN_INFO "MT2131: successfully identified at address 0x%02x\n", cfg->i2c_address); memcpy(&fe->ops.tuner_ops, &mt2131_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; return fe; } EXPORT_SYMBOL_GPL(mt2131_attach); MODULE_AUTHOR("Steven Toth"); MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "xe_rtp.h" #include <kunit/visibility.h> #include <uapi/drm/xe_drm.h> #include "xe_gt.h" #include "xe_gt_topology.h" #include "xe_macros.h" #include "xe_reg_sr.h" #include "xe_sriov.h" /** * DOC: Register Table Processing * * Internal infrastructure to define how registers should be updated based on * rules and actions. This can be used to define tables with multiple entries * (one per register) that will be walked over at some point in time to apply * the values to the registers that have matching rules. */ static bool has_samedia(const struct xe_device *xe) { return xe->info.media_verx100 >= 1300; } static bool rule_matches(const struct xe_device *xe, struct xe_gt *gt, struct xe_hw_engine *hwe, const struct xe_rtp_rule *rules, unsigned int n_rules) { const struct xe_rtp_rule *r; unsigned int i, rcount = 0; bool match; for (r = rules, i = 0; i < n_rules; r = &rules[++i]) { switch (r->match_type) { case XE_RTP_MATCH_OR: /* * This is only reached if a complete set of * rules passed or none were evaluated. For both cases, * shortcut the other rules and return the proper value. */ goto done; case XE_RTP_MATCH_PLATFORM: match = xe->info.platform == r->platform; break; case XE_RTP_MATCH_SUBPLATFORM: match = xe->info.platform == r->platform && xe->info.subplatform == r->subplatform; break; case XE_RTP_MATCH_GRAPHICS_VERSION: match = xe->info.graphics_verx100 == r->ver_start && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE: match = xe->info.graphics_verx100 >= r->ver_start && xe->info.graphics_verx100 <= r->ver_end && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT: match = xe->info.graphics_verx100 == r->ver_start; break; case XE_RTP_MATCH_GRAPHICS_STEP: match = xe->info.step.graphics >= r->step_start && xe->info.step.graphics < r->step_end && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION: match = xe->info.media_verx100 == r->ver_start && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION_RANGE: match = xe->info.media_verx100 >= r->ver_start && xe->info.media_verx100 <= r->ver_end && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_STEP: match = xe->info.step.media >= r->step_start && xe->info.step.media < r->step_end && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT: match = xe->info.media_verx100 == r->ver_start; break; case XE_RTP_MATCH_INTEGRATED: match = !xe->info.is_dgfx; break; case XE_RTP_MATCH_DISCRETE: match = xe->info.is_dgfx; break; case XE_RTP_MATCH_ENGINE_CLASS: if (drm_WARN_ON(&xe->drm, !hwe)) return false; match = hwe->class == r->engine_class; break; case XE_RTP_MATCH_NOT_ENGINE_CLASS: if (drm_WARN_ON(&xe->drm, !hwe)) return false; match = hwe->class != r->engine_class; break; case XE_RTP_MATCH_FUNC: match = r->match_func(gt, hwe); break; default: drm_warn(&xe->drm, "Invalid RTP match %u\n", r->match_type); match = false; } if (!match) { /* * Advance rules until we find XE_RTP_MATCH_OR to check * if there's another set of conditions to check */ while (++i < n_rules && rules[i].match_type != XE_RTP_MATCH_OR) ; if (i >= n_rules) return false; rcount = 0; } else { rcount++; } } done: if (drm_WARN_ON(&xe->drm, !rcount)) return false; return true; } static void rtp_add_sr_entry(const struct xe_rtp_action *action, struct xe_gt *gt, u32 mmio_base, struct xe_reg_sr *sr) { struct xe_reg_sr_entry sr_entry = { .reg = action->reg, .clr_bits = action->clr_bits, .set_bits = action->set_bits, .read_mask = action->read_mask, }; sr_entry.reg.addr += mmio_base; xe_reg_sr_add(sr, &sr_entry, gt); } static bool rtp_process_one_sr(const struct xe_rtp_entry_sr *entry, struct xe_device *xe, struct xe_gt *gt, struct xe_hw_engine *hwe, struct xe_reg_sr *sr) { const struct xe_rtp_action *action; u32 mmio_base; unsigned int i; if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) return false; for (i = 0, action = &entry->actions[0]; i < entry->n_actions; action++, i++) { if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) || (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE)) mmio_base = hwe->mmio_base; else mmio_base = 0; rtp_add_sr_entry(action, gt, mmio_base, sr); } return true; } static void rtp_get_context(struct xe_rtp_process_ctx *ctx, struct xe_hw_engine **hwe, struct xe_gt **gt, struct xe_device **xe) { switch (ctx->type) { case XE_RTP_PROCESS_TYPE_GT: *hwe = NULL; *gt = ctx->gt; *xe = gt_to_xe(*gt); break; case XE_RTP_PROCESS_TYPE_ENGINE: *hwe = ctx->hwe; *gt = (*hwe)->gt; *xe = gt_to_xe(*gt); break; } } /** * xe_rtp_process_ctx_enable_active_tracking - Enable tracking of active entries * * Set additional metadata to track what entries are considered "active", i.e. * their rules match the condition. Bits are never cleared: entries with * matching rules set the corresponding bit in the bitmap. * * @ctx: The context for processing the table * @active_entries: bitmap to store the active entries * @n_entries: number of entries to be processed */ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, unsigned long *active_entries, size_t n_entries) { ctx->active_entries = active_entries; ctx->n_entries = n_entries; } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); static void rtp_mark_active(struct xe_device *xe, struct xe_rtp_process_ctx *ctx, unsigned int idx) { if (!ctx->active_entries) return; if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries)) return; bitmap_set(ctx->active_entries, idx, 1); } /** * xe_rtp_process_to_sr - Process all rtp @entries, adding the matching ones to * the save-restore argument. * @ctx: The context for processing the table, with one of device, gt or hwe * @entries: Table with RTP definitions * @sr: Save-restore struct where matching rules execute the action. This can be * viewed as the "coalesced view" of multiple the tables. The bits for each * register set are expected not to collide with previously added entries * * Walk the table pointed by @entries (with an empty sentinel) and add all * entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is * used to calculate the right register offset */ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry_sr *entries, struct xe_reg_sr *sr) { const struct xe_rtp_entry_sr *entry; struct xe_hw_engine *hwe = NULL; struct xe_gt *gt = NULL; struct xe_device *xe = NULL; rtp_get_context(ctx, &hwe, &gt, &xe); if (IS_SRIOV_VF(xe)) return; for (entry = entries; entry && entry->name; entry++) { bool match = false; if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) { struct xe_hw_engine *each_hwe; enum xe_hw_engine_id id; for_each_hw_engine(each_hwe, gt, id) match |= rtp_process_one_sr(entry, xe, gt, each_hwe, sr); } else { match = rtp_process_one_sr(entry, xe, gt, hwe, sr); } if (match) rtp_mark_active(xe, ctx, entry - entries); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); /** * xe_rtp_process - Process all rtp @entries, without running any action * @ctx: The context for processing the table, with one of device, gt or hwe * @entries: Table with RTP definitions * * Walk the table pointed by @entries (with an empty sentinel), executing the * rules. One difference from xe_rtp_process_to_sr(): there is no action * associated with each entry since this uses struct xe_rtp_entry. Its main use * is for marking active workarounds via * xe_rtp_process_ctx_enable_active_tracking(). */ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry *entries) { const struct xe_rtp_entry *entry; struct xe_hw_engine *hwe; struct xe_gt *gt; struct xe_device *xe; rtp_get_context(ctx, &hwe, &gt, &xe); for (entry = entries; entry && entry->rules; entry++) { if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) continue; rtp_mark_active(xe, ctx, entry - entries); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); bool xe_rtp_match_even_instance(const struct xe_gt *gt, const struct xe_hw_engine *hwe) { return hwe->instance % 2 == 0; } bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, const struct xe_hw_engine *hwe) { u64 render_compute_mask = gt->info.engine_mask & (XE_HW_ENGINE_CCS_MASK | XE_HW_ENGINE_RCS_MASK); return render_compute_mask && hwe->engine_id == __ffs(render_compute_mask); } bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt, const struct xe_hw_engine *hwe) { unsigned int dss_per_gslice = 4; unsigned int dss; if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask), "Checking gslice for platform without geometry pipeline\n")) return false; dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0); return dss >= dss_per_gslice; }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LED_H #define LED_H #define LED7 0x80 /* top (or furthest right) LED */ #define LED6 0x40 #define LED5 0x20 #define LED4 0x10 #define LED3 0x08 #define LED2 0x04 #define LED1 0x02 #define LED0 0x01 /* bottom (or furthest left) LED */ #define LED_LAN_RCV LED0 /* for LAN receive activity */ #define LED_LAN_TX LED1 /* for LAN transmit activity */ #define LED_DISK_IO LED2 /* for disk activity */ #define LED_HEARTBEAT LED3 /* heartbeat */ /* values for pdc_chassis_lcd_info_ret_block.model: */ #define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */ #define DISPLAY_MODEL_NONE 1 /* no LED or LCD */ #define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */ #define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */ #define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ /* register_led_driver() */ int register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); #ifdef CONFIG_CHASSIS_LCD_LED /* writes a string to the LCD display (if possible on this h/w) */ void lcd_print(const char *str); #else #define lcd_print(str) do { } while (0) #endif #endif /* LED_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell MMC/SD/SDIO driver * * Authors: Maen Suleiman, Nicolas Pitre * Copyright (C) 2008-2009 Marvell Ltd. */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/mbus.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/of_irq.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <linux/sizes.h> #include <linux/unaligned.h> #include "mvsdio.h" #define DRIVER_NAME "mvsdio" static int maxfreq; static int nodma; struct mvsd_host { void __iomem *base; struct mmc_request *mrq; spinlock_t lock; unsigned int xfer_mode; unsigned int intr_en; unsigned int ctrl; unsigned int pio_size; void *pio_ptr; unsigned int sg_frags; unsigned int ns_per_clk; unsigned int clock; unsigned int base_clock; struct timer_list timer; struct mmc_host *mmc; struct device *dev; struct clk *clk; }; #define mvsd_write(offs, val) writel(val, iobase + (offs)) #define mvsd_read(offs) readl(iobase + (offs)) static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) { void __iomem *iobase = host->base; unsigned int tmout; int tmout_index; /* * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE * register is sometimes not set before a while when some * "unusual" data block sizes are used (such as with the SWITCH * command), even despite the fact that the XFER_DONE interrupt * was raised. And if another data transfer starts before * this bit comes to good sense (which eventually happens by * itself) then the new transfer simply fails with a timeout. */ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { unsigned long t = jiffies + HZ; unsigned int hw_state, count = 0; do { hw_state = mvsd_read(MVSD_HW_STATE); if (time_after(jiffies, t)) { dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); break; } count++; } while (!(hw_state & (1 << 13))); dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " "(hw=0x%04x, count=%d, jiffies=%ld)\n", hw_state, count, jiffies - (t - HZ)); } /* If timeout=0 then maximum timeout index is used. */ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); tmout += data->timeout_clks; tmout_index = fls(tmout - 1) - 12; if (tmout_index < 0) tmout_index = 0; if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX) tmout_index = MVSD_HOST_CTRL_TMOUT_MAX; dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n", (data->flags & MMC_DATA_READ) ? "read" : "write", (u32)sg_virt(data->sg), data->blocks, data->blksz, tmout, tmout_index); host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK; host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index); mvsd_write(MVSD_HOST_CTRL, host->ctrl); mvsd_write(MVSD_BLK_COUNT, data->blocks); mvsd_write(MVSD_BLK_SIZE, data->blksz); if (nodma || (data->blksz | data->sg->offset) & 3 || ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) { /* * We cannot do DMA on a buffer which offset or size * is not aligned on a 4-byte boundary. * * It also appears the host to card DMA can corrupt * data when the buffer is not aligned on a 64 byte * boundary. */ host->pio_size = data->blocks * data->blksz; host->pio_ptr = sg_virt(data->sg); if (!nodma) dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", host->pio_ptr, host->pio_size); return 1; } else { dma_addr_t phys_addr; host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, mmc_get_dma_dir(data)); phys_addr = sg_dma_address(data->sg); mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); return 0; } } static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; struct mmc_command *cmd = mrq->cmd; u32 cmdreg = 0, xfer = 0, intr = 0; unsigned long flags; unsigned int timeout; BUG_ON(host->mrq != NULL); host->mrq = mrq; dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n", cmd->opcode, mvsd_read(MVSD_HW_STATE)); cmdreg = MVSD_CMD_INDEX(cmd->opcode); if (cmd->flags & MMC_RSP_BUSY) cmdreg |= MVSD_CMD_RSP_48BUSY; else if (cmd->flags & MMC_RSP_136) cmdreg |= MVSD_CMD_RSP_136; else if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= MVSD_CMD_RSP_48; else cmdreg |= MVSD_CMD_RSP_NONE; if (cmd->flags & MMC_RSP_CRC) cmdreg |= MVSD_CMD_CHECK_CMDCRC; if (cmd->flags & MMC_RSP_OPCODE) cmdreg |= MVSD_CMD_INDX_CHECK; if (cmd->flags & MMC_RSP_PRESENT) { cmdreg |= MVSD_UNEXPECTED_RESP; intr |= MVSD_NOR_UNEXP_RSP; } if (mrq->data) { struct mmc_data *data = mrq->data; int pio; cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16; xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN; if (data->flags & MMC_DATA_READ) xfer |= MVSD_XFER_MODE_TO_HOST; pio = mvsd_setup_data(host, data); if (pio) { xfer |= MVSD_XFER_MODE_PIO; /* PIO section of mvsd_irq has comments on those bits */ if (data->flags & MMC_DATA_WRITE) intr |= MVSD_NOR_TX_AVAIL; else if (host->pio_size > 32) intr |= MVSD_NOR_RX_FIFO_8W; else intr |= MVSD_NOR_RX_READY; } if (data->stop) { struct mmc_command *stop = data->stop; u32 cmd12reg = 0; mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff); mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16); if (stop->flags & MMC_RSP_BUSY) cmd12reg |= MVSD_AUTOCMD12_BUSY; if (stop->flags & MMC_RSP_OPCODE) cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK; cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode); mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg); xfer |= MVSD_XFER_MODE_AUTO_CMD12; intr |= MVSD_NOR_AUTOCMD12_DONE; } else { intr |= MVSD_NOR_XFER_DONE; } } else { intr |= MVSD_NOR_CMD_DONE; } mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff); mvsd_write(MVSD_ARG_HI, cmd->arg >> 16); spin_lock_irqsave(&host->lock, flags); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; host->xfer_mode |= xfer; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mvsd_write(MVSD_CMD, cmdreg); host->intr_en &= MVSD_NOR_CARD_INT; host->intr_en |= intr | MVSD_NOR_ERROR; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0xffff); timeout = cmd->busy_timeout ? cmd->busy_timeout : 5000; mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); spin_unlock_irqrestore(&host->lock, flags); } static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd, u32 err_status) { void __iomem *iobase = host->base; if (cmd->flags & MMC_RSP_136) { unsigned int response[8], i; for (i = 0; i < 8; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[0] & 0x03ff) << 22) | ((response[1] & 0xffff) << 6) | ((response[2] & 0xfc00) >> 10); cmd->resp[1] = ((response[2] & 0x03ff) << 22) | ((response[3] & 0xffff) << 6) | ((response[4] & 0xfc00) >> 10); cmd->resp[2] = ((response[4] & 0x03ff) << 22) | ((response[5] & 0xffff) << 6) | ((response[6] & 0xfc00) >> 10); cmd->resp[3] = ((response[6] & 0x03ff) << 22) | ((response[7] & 0x3fff) << 8); } else if (cmd->flags & MMC_RSP_PRESENT) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_RSP(i)); cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); cmd->resp[1] = ((response[0] & 0xfc00) >> 10); cmd->resp[2] = 0; cmd->resp[3] = 0; } if (err_status & MVSD_ERR_CMD_TIMEOUT) { cmd->error = -ETIMEDOUT; } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) { cmd->error = -EILSEQ; } err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT); return err_status; } static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, u32 err_status) { void __iomem *iobase = host->base; if (host->pio_ptr) { host->pio_ptr = NULL; host->pio_size = 0; } else { dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, mmc_get_dma_dir(data)); } if (err_status & MVSD_ERR_DATA_TIMEOUT) data->error = -ETIMEDOUT; else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT)) data->error = -EILSEQ; else if (err_status & MVSD_ERR_XFER_SIZE) data->error = -EBADE; err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE); dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n", mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT)); data->bytes_xfered = (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz; /* We can't be sure about the last block when errors are detected */ if (data->bytes_xfered && data->error) data->bytes_xfered -= data->blksz; /* Handle Auto cmd 12 response */ if (data->stop) { unsigned int response[3], i; for (i = 0; i < 3; i++) response[i] = mvsd_read(MVSD_AUTO_RSP(i)); data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | ((response[1] & 0xffff) << (14 - 8)) | ((response[0] & 0x03ff) << (30 - 8)); data->stop->resp[1] = ((response[0] & 0xfc00) >> 10); data->stop->resp[2] = 0; data->stop->resp[3] = 0; if (err_status & MVSD_ERR_AUTOCMD12) { u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS); dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12); if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE) data->stop->error = -ENOEXEC; else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT) data->stop->error = -ETIMEDOUT; else if (err_cmd12) data->stop->error = -EILSEQ; err_status &= ~MVSD_ERR_AUTOCMD12; } } return err_status; } static irqreturn_t mvsd_irq(int irq, void *dev) { struct mvsd_host *host = dev; void __iomem *iobase = host->base; u32 intr_status, intr_done_mask; int irq_handled = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", intr_status, mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_HW_STATE)); /* * It looks like, SDIO IP can issue one late, spurious irq * although all irqs should be disabled. To work around this, * bail out early, if we didn't expect any irqs to occur. */ if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", mvsd_read(MVSD_NOR_INTR_STATUS), mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_ERR_INTR_STATUS), mvsd_read(MVSD_ERR_INTR_EN)); return IRQ_HANDLED; } spin_lock(&host->lock); /* PIO handling, if needed. Messy business... */ if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { readsw(iobase + MVSD_FIFO, p, 16); p += 16; s -= 32; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } /* * Normally we'd use < 32 here, but the RX_FIFO_8W bit * doesn't appear to assert when there is exactly 32 bytes * (8 words) left to fetch in a transfer. */ if (s <= 32) { while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) { put_unaligned(mvsd_read(MVSD_FIFO), p++); put_unaligned(mvsd_read(MVSD_FIFO), p++); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { u16 val[2] = {0, 0}; val[0] = mvsd_read(MVSD_FIFO); val[1] = mvsd_read(MVSD_FIFO); memcpy(p, ((void *)&val) + 4 - s, s); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) { host->intr_en &= ~MVSD_NOR_RX_FIFO_8W; host->intr_en |= MVSD_NOR_RX_READY; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } else if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { u16 *p = host->pio_ptr; int s = host->pio_size; /* * The TX_FIFO_8W bit is unreliable. When set, bursting * 16 halfwords all at once in the FIFO drops data. Actually * TX_AVAIL does go off after only one word is pushed even if * TX_FIFO_8W remains set. */ while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) { mvsd_write(MVSD_FIFO, get_unaligned(p++)); mvsd_write(MVSD_FIFO, get_unaligned(p++)); s -= 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s < 4) { if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { u16 val[2] = {0, 0}; memcpy(((void *)&val) + 4 - s, p, s); mvsd_write(MVSD_FIFO, val[0]); mvsd_write(MVSD_FIFO, val[1]); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s == 0) { host->intr_en &= ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); } } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); host->pio_ptr = p; host->pio_size = s; irq_handled = 1; } mvsd_write(MVSD_NOR_INTR_STATUS, intr_status); intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W; if (intr_status & host->intr_en & ~intr_done_mask) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = mrq->cmd; u32 err_status = 0; del_timer(&host->timer); host->mrq = NULL; host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); spin_unlock(&host->lock); if (intr_status & MVSD_NOR_UNEXP_RSP) { cmd->error = -EPROTO; } else if (intr_status & MVSD_NOR_ERROR) { err_status = mvsd_read(MVSD_ERR_INTR_STATUS); dev_dbg(host->dev, "err 0x%04x\n", err_status); } err_status = mvsd_finish_cmd(host, cmd, err_status); if (mrq->data) err_status = mvsd_finish_data(host, mrq->data, err_status); if (err_status) { dev_err(host->dev, "unhandled error status %#04x\n", err_status); cmd->error = -ENOMSG; } mmc_request_done(host->mmc, mrq); irq_handled = 1; } else spin_unlock(&host->lock); if (intr_status & MVSD_NOR_CARD_INT) { mmc_signal_sdio_irq(host->mmc); irq_handled = 1; } if (irq_handled) return IRQ_HANDLED; dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", intr_status, host->intr_en, host->pio_size); return IRQ_NONE; } static void mvsd_timeout_timer(struct timer_list *t) { struct mvsd_host *host = from_timer(host, t, timer); void __iomem *iobase = host->base; struct mmc_request *mrq; unsigned long flags; spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; if (mrq) { dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", mvsd_read(MVSD_HW_STATE), mvsd_read(MVSD_NOR_INTR_STATUS), mvsd_read(MVSD_NOR_INTR_EN)); host->mrq = NULL; mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; mvsd_write(MVSD_XFER_MODE, host->xfer_mode); host->intr_en &= MVSD_NOR_CARD_INT; mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); mrq->cmd->error = -ETIMEDOUT; mvsd_finish_cmd(host, mrq->cmd, 0); if (mrq->data) { mrq->data->error = -ETIMEDOUT; mvsd_finish_data(host, mrq->data, 0); } } spin_unlock_irqrestore(&host->lock, flags); if (mrq) mmc_request_done(host->mmc, mrq); } static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (enable) { host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN; host->intr_en |= MVSD_NOR_CARD_INT; } else { host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN; host->intr_en &= ~MVSD_NOR_CARD_INT; } mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); spin_unlock_irqrestore(&host->lock, flags); } static void mvsd_power_up(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power up\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, 0); mvsd_write(MVSD_NOR_STATUS_EN, 0xffff); mvsd_write(MVSD_ERR_STATUS_EN, 0xffff); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_power_down(struct mvsd_host *host) { void __iomem *iobase = host->base; dev_dbg(host->dev, "power down\n"); mvsd_write(MVSD_NOR_INTR_EN, 0); mvsd_write(MVSD_ERR_INTR_EN, 0); mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_NOR_STATUS_EN, 0); mvsd_write(MVSD_ERR_STATUS_EN, 0); mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); } static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mvsd_host *host = mmc_priv(mmc); void __iomem *iobase = host->base; u32 ctrl_reg = 0; if (ios->power_mode == MMC_POWER_UP) mvsd_power_up(host); if (ios->clock == 0) { mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX); host->clock = 0; dev_dbg(host->dev, "clock off\n"); } else if (ios->clock != host->clock) { u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1; if (m > MVSD_BASE_DIV_MAX) m = MVSD_BASE_DIV_MAX; mvsd_write(MVSD_CLK_DIV, m); host->clock = ios->clock; host->ns_per_clk = 1000000000 / (host->base_clock / (m+1)); dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n", ios->clock, host->base_clock / (m+1), m); } /* default transfer mode */ ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN; ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST; /* default to maximum timeout */ ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK; ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN; if (ios->bus_mode == MMC_BUSMODE_PUSHPULL) ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN; if (ios->bus_width == MMC_BUS_WIDTH_4) ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; /* * The HI_SPEED_EN bit is causing trouble with many (but not all) * high speed SD, SDHC and SDIO cards. Not enabling that bit * makes all cards work. So let's just ignore that bit for now * and revisit this issue if problems for not enabling this bit * are ever reported. */ #if 0 if (ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_SD_HS) ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; #endif host->ctrl = ctrl_reg; mvsd_write(MVSD_HOST_CTRL, ctrl_reg); dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg, (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ? "push-pull" : "open-drain", (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ? "4bit-width" : "1bit-width", (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ? "high-speed" : ""); if (ios->power_mode == MMC_POWER_OFF) mvsd_power_down(host); } static const struct mmc_host_ops mvsd_ops = { .request = mvsd_request, .get_ro = mmc_gpio_get_ro, .set_ios = mvsd_set_ios, .enable_sdio_irq = mvsd_enable_sdio_irq, }; static void mv_conf_mbus_windows(struct mvsd_host *host, const struct mbus_dram_target_info *dram) { void __iomem *iobase = host->base; int i; for (i = 0; i < 4; i++) { writel(0, iobase + MVSD_WINDOW_CTRL(i)); writel(0, iobase + MVSD_WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, iobase + MVSD_WINDOW_CTRL(i)); writel(cs->base, iobase + MVSD_WINDOW_BASE(i)); } } static int mvsd_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->mmc = mmc; host->dev = &pdev->dev; /* * Some non-DT platforms do not pass a clock, and the clock * frequency is passed through platform_data. On DT platforms, * a clock must always be passed, even if there is no gatable * clock associated to the SDIO interface (it can simply be a * fixed rate clock). */ host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "no clock associated\n"); ret = -EINVAL; goto out; } clk_prepare_enable(host->clk); mmc->ops = &mvsd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); mmc->f_max = MVSD_CLOCKRATE_MAX; mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; mmc->max_segs = 1; mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; host->base_clock = clk_get_rate(host->clk) / 2; ret = mmc_of_parse(mmc); if (ret < 0) goto out; if (maxfreq) mmc->f_max = maxfreq; spin_lock_init(&host->lock); host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; } /* (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(host, dram); mvsd_power_down(host); ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); if (ret) { dev_err(&pdev->dev, "cannot assign irq %d\n", irq); goto out; } timer_setup(&host->timer, mvsd_timeout_timer, 0); platform_set_drvdata(pdev, mmc); ret = mmc_add_host(mmc); if (ret) goto out; if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) dev_dbg(&pdev->dev, "using GPIO for card detection\n"); else dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); return 0; out: if (mmc) { if (!IS_ERR(host->clk)) clk_disable_unprepare(host->clk); mmc_free_host(mmc); } return ret; } static void mvsd_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct mvsd_host *host = mmc_priv(mmc); mmc_remove_host(mmc); del_timer_sync(&host->timer); mvsd_power_down(host); if (!IS_ERR(host->clk)) clk_disable_unprepare(host->clk); mmc_free_host(mmc); } static const struct of_device_id mvsdio_dt_ids[] = { { .compatible = "marvell,orion-sdio" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); static struct platform_driver mvsd_driver = { .probe = mvsd_probe, .remove = mvsd_remove, .driver = { .name = DRIVER_NAME, .probe_type = PROBE_PREFER_ASYNCHRONOUS, .of_match_table = mvsdio_dt_ids, }, }; module_platform_driver(mvsd_driver); /* maximum card clock frequency (default 50MHz) */ module_param(maxfreq, int, 0); /* force PIO transfers all the time */ module_param(nodma, int, 0); MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:mvsdio");
// SPDX-License-Identifier: GPL-2.0-or-later /* * STV0680 USB Camera Driver * * Copyright (C) 2009 Hans de Goede <[email protected]> * * This module is adapted from the in kernel v4l1 stv680 driver: * * STV0680 USB Camera Driver, by Kevin Sisson ([email protected]) * * Thanks to STMicroelectronics for information on the usb commands, and * to Steve Miller at STM for his help and encouragement while I was * writing this driver. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stv0680" #include "gspca.h" MODULE_AUTHOR("Hans de Goede <[email protected]>"); MODULE_DESCRIPTION("STV0680 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_pix_format mode; u8 orig_mode; u8 video_mode; u8 current_mode; }; static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val, int size) { int ret; u8 req_type = 0; unsigned int pipe = 0; switch (set) { case 0: /* 0xc1 */ req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 1: /* 0x41 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; case 2: /* 0x80 */ req_type = USB_DIR_IN | USB_RECIP_DEVICE; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 3: /* 0x40 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; } ret = usb_control_msg(gspca_dev->dev, pipe, req, req_type, val, 0, gspca_dev->usb_buf, size, 500); if ((ret < 0) && (req != 0x0a)) pr_err("usb_control_msg error %i, request = 0x%x, error = %i\n", set, req, ret); return ret; } static int stv0680_handle_error(struct gspca_dev *gspca_dev, int ret) { stv_sndctrl(gspca_dev, 0, 0x80, 0, 0x02); /* Get Last Error */ gspca_err(gspca_dev, "last error: %i, command = 0x%x\n", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); return ret; } static int stv0680_get_video_mode(struct gspca_dev *gspca_dev) { /* Note not sure if this init of usb_buf is really necessary */ memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = 0x0f; if (stv_sndctrl(gspca_dev, 0, 0x87, 0, 0x08) != 0x08) { gspca_err(gspca_dev, "Get_Camera_Mode failed\n"); return stv0680_handle_error(gspca_dev, -EIO); } return gspca_dev->usb_buf[0]; /* 01 = VGA, 03 = QVGA, 00 = CIF */ } static int stv0680_set_video_mode(struct gspca_dev *gspca_dev, u8 mode) { struct sd *sd = (struct sd *) gspca_dev; if (sd->current_mode == mode) return 0; memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = mode; if (stv_sndctrl(gspca_dev, 3, 0x07, 0x0100, 0x08) != 0x08) { gspca_err(gspca_dev, "Set_Camera_Mode failed\n"); return stv0680_handle_error(gspca_dev, -EIO); } /* Verify we got what we've asked for */ if (stv0680_get_video_mode(gspca_dev) != mode) { gspca_err(gspca_dev, "Error setting camera video mode!\n"); return -EIO; } sd->current_mode = mode; return 0; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { int ret; struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; /* Give the camera some time to settle, otherwise initialization will fail on hotplug, and yes it really needs a full second. */ msleep(1000); /* ping camera to be sure STV0680 is present */ if (stv_sndctrl(gspca_dev, 0, 0x88, 0x5678, 0x02) != 0x02 || gspca_dev->usb_buf[0] != 0x56 || gspca_dev->usb_buf[1] != 0x78) { gspca_err(gspca_dev, "STV(e): camera ping failed!!\n"); return stv0680_handle_error(gspca_dev, -ENODEV); } /* get camera descriptor */ if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x09) != 0x09) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x22) != 0x22 || gspca_dev->usb_buf[7] != 0xa0 || gspca_dev->usb_buf[8] != 0x23) { gspca_err(gspca_dev, "Could not get descriptor 0200\n"); return stv0680_handle_error(gspca_dev, -ENODEV); } if (stv_sndctrl(gspca_dev, 0, 0x8a, 0, 0x02) != 0x02) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x8b, 0, 0x24) != 0x24) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -ENODEV); if (!(gspca_dev->usb_buf[7] & 0x09)) { gspca_err(gspca_dev, "Camera supports neither CIF nor QVGA mode\n"); return -ENODEV; } if (gspca_dev->usb_buf[7] & 0x01) gspca_dbg(gspca_dev, D_PROBE, "Camera supports CIF mode\n"); if (gspca_dev->usb_buf[7] & 0x02) gspca_dbg(gspca_dev, D_PROBE, "Camera supports VGA mode\n"); if (gspca_dev->usb_buf[7] & 0x04) gspca_dbg(gspca_dev, D_PROBE, "Camera supports QCIF mode\n"); if (gspca_dev->usb_buf[7] & 0x08) gspca_dbg(gspca_dev, D_PROBE, "Camera supports QVGA mode\n"); if (gspca_dev->usb_buf[7] & 0x01) sd->video_mode = 0x00; /* CIF */ else sd->video_mode = 0x03; /* QVGA */ /* FW rev, ASIC rev, sensor ID */ gspca_dbg(gspca_dev, D_PROBE, "Firmware rev is %i.%i\n", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); gspca_dbg(gspca_dev, D_PROBE, "ASIC rev is %i.%i", gspca_dev->usb_buf[2], gspca_dev->usb_buf[3]); gspca_dbg(gspca_dev, D_PROBE, "Sensor ID is %i", (gspca_dev->usb_buf[4]*16) + (gspca_dev->usb_buf[5]>>4)); ret = stv0680_get_video_mode(gspca_dev); if (ret < 0) return ret; sd->current_mode = sd->orig_mode = ret; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; /* Get mode details */ if (stv_sndctrl(gspca_dev, 0, 0x8f, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); cam->bulk = 1; cam->bulk_nurbs = 1; /* The cam cannot handle more */ cam->bulk_size = (gspca_dev->usb_buf[0] << 24) | (gspca_dev->usb_buf[1] << 16) | (gspca_dev->usb_buf[2] << 8) | (gspca_dev->usb_buf[3]); sd->mode.width = (gspca_dev->usb_buf[4] << 8) | (gspca_dev->usb_buf[5]); /* 322, 356, 644 */ sd->mode.height = (gspca_dev->usb_buf[6] << 8) | (gspca_dev->usb_buf[7]); /* 242, 292, 484 */ sd->mode.pixelformat = V4L2_PIX_FMT_STV0680; sd->mode.field = V4L2_FIELD_NONE; sd->mode.bytesperline = sd->mode.width; sd->mode.sizeimage = cam->bulk_size; sd->mode.colorspace = V4L2_COLORSPACE_SRGB; /* origGain = gspca_dev->usb_buf[12]; */ cam->cam_mode = &sd->mode; cam->nmodes = 1; ret = stv0680_set_video_mode(gspca_dev, sd->orig_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 || gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) { pr_err("Could not get descriptor 0100\n"); return stv0680_handle_error(gspca_dev, -EIO); } return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { int ret; struct sd *sd = (struct sd *) gspca_dev; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); /* Start stream at: 0x0000 = CIF (352x288) 0x0100 = VGA (640x480) 0x0300 = QVGA (320x240) */ if (stv_sndctrl(gspca_dev, 1, 0x09, sd->video_mode << 8, 0x0) != 0x0) return stv0680_handle_error(gspca_dev, -EIO); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* This is a high priority command; it stops all lower order cmds */ if (stv_sndctrl(gspca_dev, 1, 0x04, 0x0000, 0x0) != 0x0) stv0680_handle_error(gspca_dev, -EIO); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->gspca_dev.present) return; stv0680_set_video_mode(gspca_dev, sd->orig_mode); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* Every now and then the camera sends a 16 byte packet, no idea what it contains, but it is not image data, when this happens the frame received before this packet is corrupt, so discard it. */ if (len != sd->mode.sizeimage) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* Finish the previous frame, we do this upon reception of the next packet, even though it is already complete so that the strange 16 byte packets send after a corrupt frame can discard it. */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); /* Store the just received frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0553, 0x0202)}, {USB_DEVICE(0x041e, 0x4007)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* RxRPC kernel service interface definitions * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #ifndef _NET_RXRPC_H #define _NET_RXRPC_H #include <linux/rxrpc.h> #include <linux/ktime.h> struct key; struct sock; struct socket; struct rxrpc_call; struct rxrpc_peer; enum rxrpc_abort_reason; enum rxrpc_interruptibility { RXRPC_INTERRUPTIBLE, /* Call is interruptible */ RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ }; /* * Debug ID counter for tracing. */ extern atomic_t rxrpc_debug_id; typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *, unsigned long); typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *, unsigned long); typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *, unsigned long); typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long); typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long); void rxrpc_kernel_new_call_notification(struct socket *, rxrpc_notify_new_call_t, rxrpc_discard_new_call_t); struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, struct rxrpc_peer *peer, struct key *key, unsigned long user_call_ID, s64 tx_total_len, u32 hard_timeout, gfp_t gfp, rxrpc_notify_rx_t notify_rx, u16 service_id, bool upgrade, enum rxrpc_interruptibility interruptibility, unsigned int debug_id); int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, rxrpc_notify_end_tx_t); int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, struct iov_iter *, size_t *, bool, u32 *, u16 *); bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, enum rxrpc_abort_reason); void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call); void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call); struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock, struct sockaddr_rxrpc *srx, gfp_t gfp); void rxrpc_kernel_put_peer(struct rxrpc_peer *peer); struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer); struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call); const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer); const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer); unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *, unsigned long); int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val); int rxrpc_sock_set_security_keyring(struct sock *, struct key *); #endif /* _NET_RXRPC_H */
// SPDX-License-Identifier: GPL-2.0 /* * Renesas RPC-IF core driver * * Copyright (C) 2018-2019 Renesas Solutions Corp. * Copyright (C) 2019 Macronix International Co., Ltd. * Copyright (C) 2019-2020 Cogent Embedded, Inc. */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/reset.h> #include <memory/renesas-rpc-if.h> #define RPCIF_CMNCR 0x0000 /* R/W */ #define RPCIF_CMNCR_MD BIT(31) #define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) #define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) #define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) #define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) #define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val)) #define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */ #define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */ #define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) #define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \ RPCIF_CMNCR_IO3FV(val)) #define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) #define RPCIF_SSLDR 0x0004 /* R/W */ #define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16) #define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8) #define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0) #define RPCIF_DRCR 0x000C /* R/W */ #define RPCIF_DRCR_SSLN BIT(24) #define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16) #define RPCIF_DRCR_RCF BIT(9) #define RPCIF_DRCR_RBE BIT(8) #define RPCIF_DRCR_SSLE BIT(0) #define RPCIF_DRCMR 0x0010 /* R/W */ #define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16) #define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0) #define RPCIF_DREAR 0x0014 /* R/W */ #define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16) #define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0) #define RPCIF_DROPR 0x0018 /* R/W */ #define RPCIF_DRENR 0x001C /* R/W */ #define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30)) #define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28) #define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24) #define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20) #define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16) #define RPCIF_DRENR_DME BIT(15) #define RPCIF_DRENR_CDE BIT(14) #define RPCIF_DRENR_OCDE BIT(12) #define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8) #define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4) #define RPCIF_SMCR 0x0020 /* R/W */ #define RPCIF_SMCR_SSLKP BIT(8) #define RPCIF_SMCR_SPIRE BIT(2) #define RPCIF_SMCR_SPIWE BIT(1) #define RPCIF_SMCR_SPIE BIT(0) #define RPCIF_SMCMR 0x0024 /* R/W */ #define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16) #define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0) #define RPCIF_SMADR 0x0028 /* R/W */ #define RPCIF_SMOPR 0x002C /* R/W */ #define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24) #define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16) #define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8) #define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0) #define RPCIF_SMENR 0x0030 /* R/W */ #define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30) #define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28) #define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24) #define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20) #define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16) #define RPCIF_SMENR_DME BIT(15) #define RPCIF_SMENR_CDE BIT(14) #define RPCIF_SMENR_OCDE BIT(12) #define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8) #define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4) #define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0) #define RPCIF_SMRDR0 0x0038 /* R */ #define RPCIF_SMRDR1 0x003C /* R */ #define RPCIF_SMWDR0 0x0040 /* W */ #define RPCIF_SMWDR1 0x0044 /* W */ #define RPCIF_CMNSR 0x0048 /* R */ #define RPCIF_CMNSR_SSLF BIT(1) #define RPCIF_CMNSR_TEND BIT(0) #define RPCIF_DRDMCR 0x0058 /* R/W */ #define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) #define RPCIF_DRDRENR 0x005C /* R/W */ #define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12) #define RPCIF_DRDRENR_ADDRE BIT(8) #define RPCIF_DRDRENR_OPDRE BIT(4) #define RPCIF_DRDRENR_DRDRE BIT(0) #define RPCIF_SMDMCR 0x0060 /* R/W */ #define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) #define RPCIF_SMDRENR 0x0064 /* R/W */ #define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12) #define RPCIF_SMDRENR_ADDRE BIT(8) #define RPCIF_SMDRENR_OPDRE BIT(4) #define RPCIF_SMDRENR_SPIDRE BIT(0) #define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ #define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ #define RPCIF_PHYCNT 0x007C /* R/W */ #define RPCIF_PHYCNT_CAL BIT(31) #define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) #define RPCIF_PHYCNT_EXDS BIT(21) #define RPCIF_PHYCNT_OCT BIT(20) #define RPCIF_PHYCNT_DDRCAL BIT(19) #define RPCIF_PHYCNT_HS BIT(18) #define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */ #define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */ #define RPCIF_PHYCNT_WBUF2 BIT(4) #define RPCIF_PHYCNT_WBUF BIT(2) #define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) #define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0) #define RPCIF_PHYOFFSET1 0x0080 /* R/W */ #define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) #define RPCIF_PHYOFFSET2 0x0084 /* R/W */ #define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8) #define RPCIF_PHYINT 0x0088 /* R/W */ #define RPCIF_PHYINT_WPVAL BIT(1) static const struct regmap_range rpcif_volatile_ranges[] = { regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1), regmap_reg_range(RPCIF_CMNSR, RPCIF_CMNSR), }; static const struct regmap_access_table rpcif_volatile_table = { .yes_ranges = rpcif_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges), }; struct rpcif_info { enum rpcif_type type; u8 strtim; }; struct rpcif_priv { struct device *dev; void __iomem *base; void __iomem *dirmap; struct regmap *regmap; struct reset_control *rstc; struct platform_device *vdev; size_t size; const struct rpcif_info *info; enum rpcif_data_dir dir; u8 bus_size; u8 xfer_size; void *buffer; u32 xferlen; u32 smcr; u32 smadr; u32 command; /* DRCMR or SMCMR */ u32 option; /* DROPR or SMOPR */ u32 enable; /* DRENR or SMENR */ u32 dummy; /* DRDMCR or SMDMCR */ u32 ddr; /* DRDRENR or SMDRENR */ }; static const struct rpcif_info rpcif_info_r8a7796 = { .type = RPCIF_RCAR_GEN3, .strtim = 6, }; static const struct rpcif_info rpcif_info_gen3 = { .type = RPCIF_RCAR_GEN3, .strtim = 7, }; static const struct rpcif_info rpcif_info_rz_g2l = { .type = RPCIF_RZ_G2L, .strtim = 7, }; static const struct rpcif_info rpcif_info_gen4 = { .type = RPCIF_RCAR_GEN4, .strtim = 15, }; /* * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with * proper width. Requires rpcif_priv.xfer_size to be correctly set before! */ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) { struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMRDR0: case RPCIF_SMWDR0: switch (rpc->xfer_size) { case 1: *val = readb(rpc->base + reg); return 0; case 2: *val = readw(rpc->base + reg); return 0; case 4: case 8: *val = readl(rpc->base + reg); return 0; default: return -EILSEQ; } case RPCIF_SMRDR1: case RPCIF_SMWDR1: if (rpc->xfer_size != 8) return -EILSEQ; break; } *val = readl(rpc->base + reg); return 0; } static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val) { struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMWDR0: switch (rpc->xfer_size) { case 1: writeb(val, rpc->base + reg); return 0; case 2: writew(val, rpc->base + reg); return 0; case 4: case 8: writel(val, rpc->base + reg); return 0; default: return -EILSEQ; } case RPCIF_SMWDR1: if (rpc->xfer_size != 8) return -EILSEQ; break; case RPCIF_SMRDR0: case RPCIF_SMRDR1: return -EPERM; } writel(val, rpc->base + reg); return 0; } static const struct regmap_config rpcif_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .reg_read = rpcif_reg_read, .reg_write = rpcif_reg_write, .fast_io = true, .max_register = RPCIF_PHYINT, .volatile_table = &rpcif_volatile_table, }; int rpcif_sw_init(struct rpcif *rpcif, struct device *dev) { struct rpcif_priv *rpc = dev_get_drvdata(dev); rpcif->dev = dev; rpcif->dirmap = rpc->dirmap; rpcif->size = rpc->size; return 0; } EXPORT_SYMBOL(rpcif_sw_init); static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc) { regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000); regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022); regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024); regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3), RPCIF_PHYCNT_CKSEL(3)); regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); } int rpcif_hw_init(struct device *dev, bool hyperflash) { struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 dummy; int ret; ret = pm_runtime_resume_and_get(dev); if (ret) return ret; if (rpc->info->type == RPCIF_RZ_G2L) { ret = reset_control_reset(rpc->rstc); if (ret) return ret; usleep_range(200, 300); rpcif_rzg2l_timing_adjust_sdr(rpc); } regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK, RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0)); /* DMA Transfer is not supported */ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0); regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, /* create mask with all affected bits set */ RPCIF_PHYCNT_STRTIM(BIT(fls(rpc->info->strtim)) - 1), RPCIF_PHYCNT_STRTIM(rpc->info->strtim)); regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3), RPCIF_PHYOFFSET1_DDRTMG(3)); regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7), RPCIF_PHYOFFSET2_OCTTMG(4)); if (hyperflash) regmap_update_bits(rpc->regmap, RPCIF_PHYINT, RPCIF_PHYINT_WPVAL, 0); if (rpc->info->type == RPCIF_RZ_G2L) regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) | RPCIF_CMNCR_BSZ(3), RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(3) | RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); else regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3), RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); /* Set RCF after BSZ update */ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); /* Dummy read according to spec */ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy); regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) | RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7)); pm_runtime_put(dev); rpc->bus_size = hyperflash ? 2 : 1; return 0; } EXPORT_SYMBOL(rpcif_hw_init); static int wait_msg_xfer_end(struct rpcif_priv *rpc) { u32 sts; return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts, sts & RPCIF_CMNSR_TEND, 0, USEC_PER_SEC); } static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes) { if (rpc->bus_size == 2) nbytes /= 2; nbytes = clamp(nbytes, 1U, 4U); return GENMASK(3, 4 - nbytes); } static u8 rpcif_bit_size(u8 buswidth) { return buswidth > 4 ? 2 : ilog2(buswidth); } void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, size_t *len) { struct rpcif_priv *rpc = dev_get_drvdata(dev); rpc->smcr = 0; rpc->smadr = 0; rpc->enable = 0; rpc->command = 0; rpc->option = 0; rpc->dummy = 0; rpc->ddr = 0; rpc->xferlen = 0; if (op->cmd.buswidth) { rpc->enable = RPCIF_SMENR_CDE | RPCIF_SMENR_CDB(rpcif_bit_size(op->cmd.buswidth)); rpc->command = RPCIF_SMCMR_CMD(op->cmd.opcode); if (op->cmd.ddr) rpc->ddr = RPCIF_SMDRENR_HYPE(0x5); } if (op->ocmd.buswidth) { rpc->enable |= RPCIF_SMENR_OCDE | RPCIF_SMENR_OCDB(rpcif_bit_size(op->ocmd.buswidth)); rpc->command |= RPCIF_SMCMR_OCMD(op->ocmd.opcode); } if (op->addr.buswidth) { rpc->enable |= RPCIF_SMENR_ADB(rpcif_bit_size(op->addr.buswidth)); if (op->addr.nbytes == 4) rpc->enable |= RPCIF_SMENR_ADE(0xF); else rpc->enable |= RPCIF_SMENR_ADE(GENMASK( 2, 3 - op->addr.nbytes)); if (op->addr.ddr) rpc->ddr |= RPCIF_SMDRENR_ADDRE; if (offs && len) rpc->smadr = *offs; else rpc->smadr = op->addr.val; } if (op->dummy.buswidth) { rpc->enable |= RPCIF_SMENR_DME; rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles); } if (op->option.buswidth) { rpc->enable |= RPCIF_SMENR_OPDE( rpcif_bits_set(rpc, op->option.nbytes)) | RPCIF_SMENR_OPDB(rpcif_bit_size(op->option.buswidth)); if (op->option.ddr) rpc->ddr |= RPCIF_SMDRENR_OPDRE; rpc->option = op->option.val; } rpc->dir = op->data.dir; if (op->data.buswidth) { u32 nbytes; rpc->buffer = op->data.buf.in; switch (op->data.dir) { case RPCIF_DATA_IN: rpc->smcr = RPCIF_SMCR_SPIRE; break; case RPCIF_DATA_OUT: rpc->smcr = RPCIF_SMCR_SPIWE; break; default: break; } if (op->data.ddr) rpc->ddr |= RPCIF_SMDRENR_SPIDRE; if (offs && len) nbytes = *len; else nbytes = op->data.nbytes; rpc->xferlen = nbytes; rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth)); } } EXPORT_SYMBOL(rpcif_prepare); int rpcif_manual_xfer(struct device *dev) { struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4; int ret = 0; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL); regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, RPCIF_CMNCR_MD); regmap_write(rpc->regmap, RPCIF_SMCMR, rpc->command); regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option); regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy); regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr); regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr); smenr = rpc->enable; switch (rpc->dir) { case RPCIF_DATA_OUT: while (pos < rpc->xferlen) { u32 bytes_left = rpc->xferlen - pos; u32 nbytes, data[2], *p = data; smcr = rpc->smcr | RPCIF_SMCR_SPIE; /* nbytes may only be 1, 2, 4, or 8 */ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left)); if (bytes_left > nbytes) smcr |= RPCIF_SMCR_SSLKP; smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)); regmap_write(rpc->regmap, RPCIF_SMENR, smenr); rpc->xfer_size = nbytes; memcpy(data, rpc->buffer + pos, nbytes); if (nbytes == 8) regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++); regmap_write(rpc->regmap, RPCIF_SMWDR0, *p); regmap_write(rpc->regmap, RPCIF_SMCR, smcr); ret = wait_msg_xfer_end(rpc); if (ret) goto err_out; pos += nbytes; smenr = rpc->enable & ~RPCIF_SMENR_CDE & ~RPCIF_SMENR_ADE(0xF); } break; case RPCIF_DATA_IN: /* * RPC-IF spoils the data for the commands without an address * phase (like RDID) in the manual mode, so we'll have to work * around this issue by using the external address space read * mode instead. */ if (!(smenr & RPCIF_SMENR_ADE(0xF)) && rpc->dirmap) { u32 dummy; regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RBURST(32) | RPCIF_DRCR_RBE); regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command); regmap_write(rpc->regmap, RPCIF_DREAR, RPCIF_DREAR_EAC(1)); regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option); regmap_write(rpc->regmap, RPCIF_DRENR, smenr & ~RPCIF_SMENR_SPIDE(0xF)); regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy); regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr); memcpy_fromio(rpc->buffer, rpc->dirmap, rpc->xferlen); regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); /* Dummy read according to spec */ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy); break; } while (pos < rpc->xferlen) { u32 bytes_left = rpc->xferlen - pos; u32 nbytes, data[2], *p = data; /* nbytes may only be 1, 2, 4, or 8 */ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left)); regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr + pos); smenr &= ~RPCIF_SMENR_SPIDE(0xF); smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)); regmap_write(rpc->regmap, RPCIF_SMENR, smenr); regmap_write(rpc->regmap, RPCIF_SMCR, rpc->smcr | RPCIF_SMCR_SPIE); rpc->xfer_size = nbytes; ret = wait_msg_xfer_end(rpc); if (ret) goto err_out; if (nbytes == 8) regmap_read(rpc->regmap, RPCIF_SMRDR1, p++); regmap_read(rpc->regmap, RPCIF_SMRDR0, p); memcpy(rpc->buffer + pos, data, nbytes); pos += nbytes; } break; default: regmap_write(rpc->regmap, RPCIF_SMENR, rpc->enable); regmap_write(rpc->regmap, RPCIF_SMCR, rpc->smcr | RPCIF_SMCR_SPIE); ret = wait_msg_xfer_end(rpc); if (ret) goto err_out; } exit: pm_runtime_put(dev); return ret; err_out: if (reset_control_reset(rpc->rstc)) dev_err(dev, "Failed to reset HW\n"); rpcif_hw_init(dev, rpc->bus_size == 2); goto exit; } EXPORT_SYMBOL(rpcif_manual_xfer); static void memcpy_fromio_readw(void *to, const void __iomem *from, size_t count) { const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4; u8 buf[2]; if (count && ((unsigned long)from & 1)) { *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1)); *(u8 *)to = buf[1]; from++; to++; count--; } while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) { *(u16 *)to = __raw_readw(from); from += 2; to += 2; count -= 2; } while (count >= maxw) { #ifdef CONFIG_64BIT *(u64 *)to = __raw_readq(from); #else *(u32 *)to = __raw_readl(from); #endif from += maxw; to += maxw; count -= maxw; } while (count >= 2) { *(u16 *)to = __raw_readw(from); from += 2; to += 2; count -= 2; } if (count) { *(u16 *)buf = __raw_readw(from); *(u8 *)to = buf[0]; } } ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) { struct rpcif_priv *rpc = dev_get_drvdata(dev); loff_t from = offs & (rpc->size - 1); size_t size = rpc->size - from; int ret; if (len > size) len = size; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); regmap_write(rpc->regmap, RPCIF_DRCR, 0); regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command); regmap_write(rpc->regmap, RPCIF_DREAR, RPCIF_DREAR_EAV(offs >> 25) | RPCIF_DREAR_EAC(1)); regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option); regmap_write(rpc->regmap, RPCIF_DRENR, rpc->enable & ~RPCIF_SMENR_SPIDE(0xF)); regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy); regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr); if (rpc->bus_size == 2) memcpy_fromio_readw(buf, rpc->dirmap + from, len); else memcpy_fromio(buf, rpc->dirmap + from, len); pm_runtime_put(dev); return len; } EXPORT_SYMBOL(rpcif_dirmap_read); static int rpcif_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct platform_device *vdev; struct device_node *flash; struct rpcif_priv *rpc; struct resource *res; const char *name; int ret; flash = of_get_next_child(dev->of_node, NULL); if (!flash) { dev_warn(dev, "no flash node found\n"); return -ENODEV; } if (of_device_is_compatible(flash, "jedec,spi-nor")) { name = "rpc-if-spi"; } else if (of_device_is_compatible(flash, "cfi-flash")) { name = "rpc-if-hyperflash"; } else { of_node_put(flash); dev_warn(dev, "unknown flash type\n"); return -ENODEV; } of_node_put(flash); rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL); if (!rpc) return -ENOMEM; rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); if (IS_ERR(rpc->base)) return PTR_ERR(rpc->base); rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config); if (IS_ERR(rpc->regmap)) { dev_err(dev, "failed to init regmap for rpcif, error %ld\n", PTR_ERR(rpc->regmap)); return PTR_ERR(rpc->regmap); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); rpc->dirmap = devm_ioremap_resource(dev, res); if (IS_ERR(rpc->dirmap)) return PTR_ERR(rpc->dirmap); rpc->size = resource_size(res); rpc->info = of_device_get_match_data(dev); rpc->rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(rpc->rstc)) return PTR_ERR(rpc->rstc); vdev = platform_device_alloc(name, pdev->id); if (!vdev) return -ENOMEM; vdev->dev.parent = dev; rpc->dev = dev; rpc->vdev = vdev; platform_set_drvdata(pdev, rpc); ret = platform_device_add(vdev); if (ret) { platform_device_put(vdev); return ret; } return 0; } static void rpcif_remove(struct platform_device *pdev) { struct rpcif_priv *rpc = platform_get_drvdata(pdev); platform_device_unregister(rpc->vdev); } static const struct of_device_id rpcif_of_match[] = { { .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 }, { .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 }, { .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 }, { .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l }, {}, }; MODULE_DEVICE_TABLE(of, rpcif_of_match); static struct platform_driver rpcif_driver = { .probe = rpcif_probe, .remove = rpcif_remove, .driver = { .name = "rpc-if", .of_match_table = rpcif_of_match, }, }; module_platform_driver(rpcif_driver); MODULE_DESCRIPTION("Renesas RPC-IF core driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-or-later /* * "LAPB via ethernet" driver release 001 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * This is a "pseudo" network driver to allow LAPB over Ethernet. * * This driver can use any ethernet destination address, and can be * limited to accept frames from one dedicated ethernet card only. * * History * LAPBETH 001 Jonathan Naylor Cloned from bpqether.c * 2000-10-29 Henner Eisen lapb_data_indication() return status. * 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/lapb.h> #include <linux/init.h> #include <net/x25device.h> static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; /* If this number is made larger, check that the temporary string buffer * in lapbeth_new_device is large enough to store the probe device name. */ #define MAXLAPBDEV 100 struct lapbethdev { struct list_head node; struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* lapbeth device (lapb#) */ bool up; spinlock_t up_lock; /* Protects "up" */ struct sk_buff_head rx_queue; struct napi_struct napi; }; static LIST_HEAD(lapbeth_devices); static void lapbeth_connected(struct net_device *dev, int reason); static void lapbeth_disconnected(struct net_device *dev, int reason); /* ------------------------------------------------------------------------ */ /* Get the LAPB device for the ethernet device */ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) { struct lapbethdev *lapbeth; list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) { if (lapbeth->ethdev == dev) return lapbeth; } return NULL; } static __inline__ int dev_is_ethdev(struct net_device *dev) { return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); } /* ------------------------------------------------------------------------ */ static int lapbeth_napi_poll(struct napi_struct *napi, int budget) { struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev, napi); struct sk_buff *skb; int processed = 0; for (; processed < budget; ++processed) { skb = skb_dequeue(&lapbeth->rx_queue); if (!skb) break; netif_receive_skb_core(skb); } if (processed < budget) napi_complete(napi); return processed; } /* Receive a LAPB frame via an ethernet interface. */ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { int len, err; struct lapbethdev *lapbeth; if (dev_net(dev) != &init_net) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NET_RX_DROP; if (!pskb_may_pull(skb, 2)) goto drop; rcu_read_lock(); lapbeth = lapbeth_get_x25_dev(dev); if (!lapbeth) goto drop_unlock_rcu; spin_lock_bh(&lapbeth->up_lock); if (!lapbeth->up) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256; dev->stats.rx_packets++; dev->stats.rx_bytes += len; skb_pull(skb, 2); /* Remove the length bytes */ skb_trim(skb, len); /* Set the length of the data */ err = lapb_data_received(lapbeth->axdev, skb); if (err != LAPB_OK) { printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err); goto drop_unlock; } out: spin_unlock_bh(&lapbeth->up_lock); rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto out; drop_unlock_rcu: rcu_read_unlock(); drop: kfree_skb(skb); return 0; } static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; if (skb_cow(skb, 1)) { kfree_skb(skb); return NET_RX_DROP; } skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; skb->protocol = x25_type_trans(skb, dev); skb_queue_tail(&lapbeth->rx_queue, skb); napi_schedule(&lapbeth->napi); return NET_RX_SUCCESS; } /* Send a LAPB frame via an ethernet interface */ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { struct lapbethdev *lapbeth = netdev_priv(dev); int err; spin_lock_bh(&lapbeth->up_lock); if (!lapbeth->up) goto drop; /* There should be a pseudo header of 1 byte added by upper layers. * Check to make sure it is there before reading it. */ if (skb->len < 1) goto drop; switch (skb->data[0]) { case X25_IFACE_DATA: break; case X25_IFACE_CONNECT: err = lapb_connect_request(dev); if (err == LAPB_CONNECTED) lapbeth_connected(dev, LAPB_OK); else if (err != LAPB_OK) pr_err("lapb_connect_request error: %d\n", err); goto drop; case X25_IFACE_DISCONNECT: err = lapb_disconnect_request(dev); if (err == LAPB_NOTCONNECTED) lapbeth_disconnected(dev, LAPB_OK); else if (err != LAPB_OK) pr_err("lapb_disconnect_request err: %d\n", err); fallthrough; default: goto drop; } skb_pull(skb, 1); err = lapb_data_request(dev, skb); if (err != LAPB_OK) { pr_err("lapb_data_request error - %d\n", err); goto drop; } out: spin_unlock_bh(&lapbeth->up_lock); return NETDEV_TX_OK; drop: kfree_skb(skb); goto out; } static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) { struct lapbethdev *lapbeth = netdev_priv(ndev); unsigned char *ptr; struct net_device *dev; int size = skb->len; ptr = skb_push(skb, 2); *ptr++ = size % 256; *ptr++ = size / 256; ndev->stats.tx_packets++; ndev->stats.tx_bytes += size; skb->dev = dev = lapbeth->ethdev; skb->protocol = htons(ETH_P_DEC); skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); } static void lapbeth_connected(struct net_device *dev, int reason) { struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!skb) return; ptr = skb_put(skb, 1); *ptr = X25_IFACE_CONNECT; skb->protocol = x25_type_trans(skb, dev); skb_queue_tail(&lapbeth->rx_queue, skb); napi_schedule(&lapbeth->napi); } static void lapbeth_disconnected(struct net_device *dev, int reason) { struct lapbethdev *lapbeth = netdev_priv(dev); unsigned char *ptr; struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!skb) return; ptr = skb_put(skb, 1); *ptr = X25_IFACE_DISCONNECT; skb->protocol = x25_type_trans(skb, dev); skb_queue_tail(&lapbeth->rx_queue, skb); napi_schedule(&lapbeth->napi); } /* Set AX.25 callsign */ static int lapbeth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; dev_addr_set(dev, sa->sa_data); return 0; } static const struct lapb_register_struct lapbeth_callbacks = { .connect_confirmation = lapbeth_connected, .connect_indication = lapbeth_connected, .disconnect_confirmation = lapbeth_disconnected, .disconnect_indication = lapbeth_disconnected, .data_indication = lapbeth_data_indication, .data_transmit = lapbeth_data_transmit, }; /* open/close a device */ static int lapbeth_open(struct net_device *dev) { struct lapbethdev *lapbeth = netdev_priv(dev); int err; napi_enable(&lapbeth->napi); err = lapb_register(dev, &lapbeth_callbacks); if (err != LAPB_OK) { napi_disable(&lapbeth->napi); pr_err("lapb_register error: %d\n", err); return -ENODEV; } spin_lock_bh(&lapbeth->up_lock); lapbeth->up = true; spin_unlock_bh(&lapbeth->up_lock); return 0; } static int lapbeth_close(struct net_device *dev) { struct lapbethdev *lapbeth = netdev_priv(dev); int err; spin_lock_bh(&lapbeth->up_lock); lapbeth->up = false; spin_unlock_bh(&lapbeth->up_lock); err = lapb_unregister(dev); if (err != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); napi_disable(&lapbeth->napi); return 0; } /* ------------------------------------------------------------------------ */ static const struct net_device_ops lapbeth_netdev_ops = { .ndo_open = lapbeth_open, .ndo_stop = lapbeth_close, .ndo_start_xmit = lapbeth_xmit, .ndo_set_mac_address = lapbeth_set_mac_address, }; static void lapbeth_setup(struct net_device *dev) { dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } /* Setup a new device. */ static int lapbeth_new_device(struct net_device *dev) { struct net_device *ndev; struct lapbethdev *lapbeth; int rc = -ENOMEM; ASSERT_RTNL(); if (dev->type != ARPHRD_ETHER) return -EINVAL; ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN, lapbeth_setup); if (!ndev) goto out; /* When transmitting data: * first this driver removes a pseudo header of 1 byte, * then the lapb module prepends an LAPB header of at most 3 bytes, * then this driver prepends a length field of 2 bytes, * then the underlying Ethernet device prepends its own header. */ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + dev->needed_headroom; ndev->needed_tailroom = dev->needed_tailroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; dev_hold(dev); lapbeth->ethdev = dev; lapbeth->up = false; spin_lock_init(&lapbeth->up_lock); skb_queue_head_init(&lapbeth->rx_queue); netif_napi_add_weight(ndev, &lapbeth->napi, lapbeth_napi_poll, 16); rc = -EIO; if (register_netdevice(ndev)) goto fail; list_add_rcu(&lapbeth->node, &lapbeth_devices); rc = 0; out: return rc; fail: dev_put(dev); free_netdev(ndev); goto out; } /* Free a lapb network device. */ static void lapbeth_free_device(struct lapbethdev *lapbeth) { dev_put(lapbeth->ethdev); list_del_rcu(&lapbeth->node); unregister_netdevice(lapbeth->axdev); } /* Handle device status changes. * * Called from notifier with RTNL held. */ static int lapbeth_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct lapbethdev *lapbeth; struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UP: /* New ethernet device -> new LAPB interface */ if (!lapbeth_get_x25_dev(dev)) lapbeth_new_device(dev); break; case NETDEV_GOING_DOWN: /* ethernet device closes -> close LAPB interface */ lapbeth = lapbeth_get_x25_dev(dev); if (lapbeth) dev_close(lapbeth->axdev); break; case NETDEV_UNREGISTER: /* ethernet device disappears -> remove LAPB interface */ lapbeth = lapbeth_get_x25_dev(dev); if (lapbeth) lapbeth_free_device(lapbeth); break; } return NOTIFY_DONE; } /* ------------------------------------------------------------------------ */ static struct packet_type lapbeth_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DEC), .func = lapbeth_rcv, }; static struct notifier_block lapbeth_dev_notifier = { .notifier_call = lapbeth_device_event, }; static const char banner[] __initconst = KERN_INFO "LAPB Ethernet driver version 0.02\n"; static int __init lapbeth_init_driver(void) { dev_add_pack(&lapbeth_packet_type); register_netdevice_notifier(&lapbeth_dev_notifier); printk(banner); return 0; } module_init(lapbeth_init_driver); static void __exit lapbeth_cleanup_driver(void) { struct lapbethdev *lapbeth; struct list_head *entry, *tmp; dev_remove_pack(&lapbeth_packet_type); unregister_netdevice_notifier(&lapbeth_dev_notifier); rtnl_lock(); list_for_each_safe(entry, tmp, &lapbeth_devices) { lapbeth = list_entry(entry, struct lapbethdev, node); dev_put(lapbeth->ethdev); unregister_netdevice(lapbeth->axdev); } rtnl_unlock(); } module_exit(lapbeth_cleanup_driver); MODULE_AUTHOR("Jonathan Naylor <[email protected]>"); MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020-21 Intel Corporation. */ #include "iosm_ipc_pcie.h" #include "iosm_ipc_protocol.h" static void ipc_write_dbell_reg(struct iosm_pcie *ipc_pcie, int irq_n, u32 data) { void __iomem *write_reg; /* Select the first doorbell register, which is only currently needed * by CP. */ write_reg = (void __iomem *)((u8 __iomem *)ipc_pcie->ipc_regs + ipc_pcie->doorbell_write + (irq_n * ipc_pcie->doorbell_reg_offset)); /* Fire the doorbell irq by writing data on the doorbell write pointer * register. */ iowrite32(data, write_reg); } void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data) { ipc_write_dbell_reg(ipc_pcie, irq_n, data); } /* Threaded Interrupt handler for MSI interrupts */ static irqreturn_t ipc_msi_interrupt(int irq, void *dev_id) { struct iosm_pcie *ipc_pcie = dev_id; int instance = irq - ipc_pcie->pci->irq; /* Shift the MSI irq actions to the IPC tasklet. IRQ_NONE means the * irq was not from the IPC device or could not be served. */ if (instance >= ipc_pcie->nvec) return IRQ_NONE; if (!test_bit(0, &ipc_pcie->suspend)) ipc_imem_irq_process(ipc_pcie->imem, instance); return IRQ_HANDLED; } void ipc_release_irq(struct iosm_pcie *ipc_pcie) { struct pci_dev *pdev = ipc_pcie->pci; if (pdev->msi_enabled) { while (--ipc_pcie->nvec >= 0) free_irq(pdev->irq + ipc_pcie->nvec, ipc_pcie); } pci_free_irq_vectors(pdev); } int ipc_acquire_irq(struct iosm_pcie *ipc_pcie) { struct pci_dev *pdev = ipc_pcie->pci; int i, rc = -EINVAL; ipc_pcie->nvec = pci_alloc_irq_vectors(pdev, IPC_MSI_VECTORS, IPC_MSI_VECTORS, PCI_IRQ_MSI); if (ipc_pcie->nvec < 0) { rc = ipc_pcie->nvec; goto error; } if (!pdev->msi_enabled) goto error; for (i = 0; i < ipc_pcie->nvec; ++i) { rc = request_threaded_irq(pdev->irq + i, NULL, ipc_msi_interrupt, IRQF_ONESHOT, KBUILD_MODNAME, ipc_pcie); if (rc) { dev_err(ipc_pcie->dev, "unable to grab IRQ, rc=%d", rc); ipc_pcie->nvec = i; ipc_release_irq(ipc_pcie); goto error; } } error: return rc; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * GE watchdog userspace interface * * Author: Martyn Welch <[email protected]> * * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. * * Based on: mv64x60_wdt.c (MV64X60 watchdog userspace interface) * Author: James Chapman <[email protected]> */ /* TODO: * This driver does not provide support for the hardwares capability of sending * an interrupt at a programmable threshold. * * This driver currently can only support 1 watchdog - there are 2 in the * hardware that this driver supports. Thus one could be configured as a * process-based watchdog (via /dev/watchdog), the second (using the interrupt * capabilities) a kernel-based watchdog. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/init.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/uaccess.h> #include <sysdev/fsl_soc.h> /* * The watchdog configuration register contains a pair of 2-bit fields, * 1. a reload field, bits 27-26, which triggers a reload of * the countdown register, and * 2. an enable field, bits 25-24, which toggles between * enabling and disabling the watchdog timer. * Bit 31 is a read-only field which indicates whether the * watchdog timer is currently enabled. * * The low 24 bits contain the timer reload value. */ #define GEF_WDC_ENABLE_SHIFT 24 #define GEF_WDC_SERVICE_SHIFT 26 #define GEF_WDC_ENABLED_SHIFT 31 #define GEF_WDC_ENABLED_TRUE 1 #define GEF_WDC_ENABLED_FALSE 0 /* Flags bits */ #define GEF_WDOG_FLAG_OPENED 0 static unsigned long wdt_flags; static int wdt_status; static void __iomem *gef_wdt_regs; static int gef_wdt_timeout; static int gef_wdt_count; static unsigned int bus_clk; static char expect_close; static DEFINE_SPINLOCK(gef_wdt_spinlock); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static int gef_wdt_toggle_wdc(int enabled_predicate, int field_shift) { u32 data; u32 enabled; int ret = 0; spin_lock(&gef_wdt_spinlock); data = ioread32be(gef_wdt_regs); enabled = (data >> GEF_WDC_ENABLED_SHIFT) & 1; /* only toggle the requested field if enabled state matches predicate */ if ((enabled ^ enabled_predicate) == 0) { /* We write a 1, then a 2 -- to the appropriate field */ data = (1 << field_shift) | gef_wdt_count; iowrite32be(data, gef_wdt_regs); data = (2 << field_shift) | gef_wdt_count; iowrite32be(data, gef_wdt_regs); ret = 1; } spin_unlock(&gef_wdt_spinlock); return ret; } static void gef_wdt_service(void) { gef_wdt_toggle_wdc(GEF_WDC_ENABLED_TRUE, GEF_WDC_SERVICE_SHIFT); } static void gef_wdt_handler_enable(void) { if (gef_wdt_toggle_wdc(GEF_WDC_ENABLED_FALSE, GEF_WDC_ENABLE_SHIFT)) { gef_wdt_service(); pr_notice("watchdog activated\n"); } } static void gef_wdt_handler_disable(void) { if (gef_wdt_toggle_wdc(GEF_WDC_ENABLED_TRUE, GEF_WDC_ENABLE_SHIFT)) pr_notice("watchdog deactivated\n"); } static void gef_wdt_set_timeout(unsigned int timeout) { /* maximum bus cycle count is 0xFFFFFFFF */ if (timeout > 0xFFFFFFFF / bus_clk) timeout = 0xFFFFFFFF / bus_clk; /* Register only holds upper 24 bits, bit shifted into lower 24 */ gef_wdt_count = (timeout * bus_clk) >> 8; gef_wdt_timeout = timeout; } static ssize_t gef_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } gef_wdt_service(); } return len; } static long gef_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int timeout; int options; void __user *argp = (void __user *)arg; static const struct watchdog_info info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 0, .identity = "GE watchdog", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &info, sizeof(info))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: if (put_user(wdt_status, (int __user *)argp)) return -EFAULT; wdt_status &= ~WDIOF_KEEPALIVEPING; break; case WDIOC_SETOPTIONS: if (get_user(options, (int __user *)argp)) return -EFAULT; if (options & WDIOS_DISABLECARD) gef_wdt_handler_disable(); if (options & WDIOS_ENABLECARD) gef_wdt_handler_enable(); break; case WDIOC_KEEPALIVE: gef_wdt_service(); wdt_status |= WDIOF_KEEPALIVEPING; break; case WDIOC_SETTIMEOUT: if (get_user(timeout, (int __user *)argp)) return -EFAULT; gef_wdt_set_timeout(timeout); fallthrough; case WDIOC_GETTIMEOUT: if (put_user(gef_wdt_timeout, (int __user *)argp)) return -EFAULT; break; default: return -ENOTTY; } return 0; } static int gef_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(GEF_WDOG_FLAG_OPENED, &wdt_flags)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); gef_wdt_handler_enable(); return stream_open(inode, file); } static int gef_wdt_release(struct inode *inode, struct file *file) { if (expect_close == 42) gef_wdt_handler_disable(); else { pr_crit("unexpected close, not stopping timer!\n"); gef_wdt_service(); } expect_close = 0; clear_bit(GEF_WDOG_FLAG_OPENED, &wdt_flags); return 0; } static const struct file_operations gef_wdt_fops = { .owner = THIS_MODULE, .write = gef_wdt_write, .unlocked_ioctl = gef_wdt_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = gef_wdt_open, .release = gef_wdt_release, }; static struct miscdevice gef_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &gef_wdt_fops, }; static int gef_wdt_probe(struct platform_device *dev) { int timeout = 10; u32 freq; bus_clk = 133; /* in MHz */ freq = fsl_get_sys_freq(); if (freq != -1) bus_clk = freq; /* Map devices registers into memory */ gef_wdt_regs = of_iomap(dev->dev.of_node, 0); if (gef_wdt_regs == NULL) return -ENOMEM; gef_wdt_set_timeout(timeout); gef_wdt_handler_disable(); /* in case timer was already running */ return misc_register(&gef_wdt_miscdev); } static void gef_wdt_remove(struct platform_device *dev) { misc_deregister(&gef_wdt_miscdev); gef_wdt_handler_disable(); iounmap(gef_wdt_regs); } static const struct of_device_id gef_wdt_ids[] = { { .compatible = "gef,fpga-wdt", }, {}, }; MODULE_DEVICE_TABLE(of, gef_wdt_ids); static struct platform_driver gef_wdt_driver = { .driver = { .name = "gef_wdt", .of_match_table = gef_wdt_ids, }, .probe = gef_wdt_probe, .remove = gef_wdt_remove, }; static int __init gef_wdt_init(void) { pr_info("GE watchdog driver\n"); return platform_driver_register(&gef_wdt_driver); } static void __exit gef_wdt_exit(void) { platform_driver_unregister(&gef_wdt_driver); } module_init(gef_wdt_init); module_exit(gef_wdt_exit); MODULE_AUTHOR("Martyn Welch <[email protected]>"); MODULE_DESCRIPTION("GE watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gef_wdt");
/* SPDX-License-Identifier: GPL-2.0 */ /* * cxd2880_integ.h * Sony CXD2880 DVB-T2/T tuner + demodulator driver * integration layer common interface * * Copyright (C) 2016, 2017, 2018 Sony Semiconductor Solutions Corporation */ #ifndef CXD2880_INTEG_H #define CXD2880_INTEG_H #include "cxd2880_tnrdmd.h" #define CXD2880_TNRDMD_WAIT_INIT_TIMEOUT 500 #define CXD2880_TNRDMD_WAIT_INIT_INTVL 10 #define CXD2880_TNRDMD_WAIT_AGC_STABLE 100 int cxd2880_integ_init(struct cxd2880_tnrdmd *tnr_dmd); int cxd2880_integ_cancel(struct cxd2880_tnrdmd *tnr_dmd); int cxd2880_integ_check_cancellation(struct cxd2880_tnrdmd *tnr_dmd); #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header for Bestcomm General Buffer Descriptor tasks driver * * Copyright (C) 2007 Sylvain Munaut <[email protected]> * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons <[email protected]> */ #ifndef __BESTCOMM_GEN_BD_H__ #define __BESTCOMM_GEN_BD_H__ struct bcom_gen_bd { u32 status; u32 buf_pa; }; extern struct bcom_task * bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr, int maxbufsize); extern int bcom_gen_bd_rx_reset(struct bcom_task *tsk); extern void bcom_gen_bd_rx_release(struct bcom_task *tsk); extern struct bcom_task * bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr); extern int bcom_gen_bd_tx_reset(struct bcom_task *tsk); extern void bcom_gen_bd_tx_release(struct bcom_task *tsk); /* PSC support utility wrappers */ struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, phys_addr_t fifo, int maxbufsize); struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo); #endif /* __BESTCOMM_GEN_BD_H__ */
// SPDX-License-Identifier: GPL-2.0 #include "qcom-msm8974pro-sony-xperia-shinano-common.dtsi" / { model = "Sony Xperia Z3"; compatible = "sony,xperia-leo", "qcom,msm8974pro", "qcom,msm8974"; chassis-type = "handset"; gpio-keys { key-camera-snapshot { label = "camera_snapshot"; gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>; linux,code = <KEY_CAMERA>; debounce-interval = <15>; }; key-camera-focus { label = "camera_focus"; gpios = <&pm8941_gpios 4 GPIO_ACTIVE_LOW>; linux,code = <KEY_CAMERA_FOCUS>; debounce-interval = <15>; }; }; }; &gpio_keys_pin_a { pins = "gpio2", "gpio3", "gpio4", "gpio5"; }; &smbb { usb-charge-current-limit = <1500000>; qcom,fast-charge-safe-current = <3000000>; qcom,fast-charge-current-limit = <2150000>; qcom,fast-charge-safe-voltage = <4400000>; qcom,fast-charge-high-threshold-voltage = <4350000>; qcom,auto-recharge-threshold-voltage = <4280000>; qcom,minimum-input-voltage = <4200000>; status = "okay"; }; &synaptics_touchscreen { vio-supply = <&pm8941_s3>; };
// SPDX-License-Identifier: GPL-2.0 #include <unistd.h> /* As a regular read(2), but allows to specify a timeout in micro-seconds. * Returns -EAGAIN on timeout. */ int read_with_timeout(int fd, char *buf, size_t count, long usec);
// SPDX-License-Identifier: GPL-2.0-or-later /* * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich * * Copyright (c) 2000 RP Internet (www.rpi.net.au). */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/oid_registry.h> #include "glob.h" #include "asn1.h" #include "connection.h" #include "auth.h" #include "ksmbd_spnego_negtokeninit.asn1.h" #include "ksmbd_spnego_negtokentarg.asn1.h" #define NTLMSSP_OID_LEN 10 static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x02, 0x0a }; int ksmbd_decode_negTokenInit(unsigned char *security_blob, int length, struct ksmbd_conn *conn) { return asn1_ber_decoder(&ksmbd_spnego_negtokeninit_decoder, conn, security_blob, length); } int ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length, struct ksmbd_conn *conn) { return asn1_ber_decoder(&ksmbd_spnego_negtokentarg_decoder, conn, security_blob, length); } static int compute_asn_hdr_len_bytes(int len) { if (len > 0xFFFFFF) return 4; else if (len > 0xFFFF) return 3; else if (len > 0xFF) return 2; else if (len > 0x7F) return 1; else return 0; } static void encode_asn_tag(char *buf, unsigned int *ofs, char tag, char seq, int length) { int i; int index = *ofs; char hdr_len = compute_asn_hdr_len_bytes(length); int len = length + 2 + hdr_len; /* insert tag */ buf[index++] = tag; if (!hdr_len) { buf[index++] = len; } else { buf[index++] = 0x80 | hdr_len; for (i = hdr_len - 1; i >= 0; i--) buf[index++] = (len >> (i * 8)) & 0xFF; } /* insert seq */ len = len - (index - *ofs); buf[index++] = seq; if (!hdr_len) { buf[index++] = len; } else { buf[index++] = 0x80 | hdr_len; for (i = hdr_len - 1; i >= 0; i--) buf[index++] = (len >> (i * 8)) & 0xFF; } *ofs += (index - *ofs); } int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen, char *ntlm_blob, int ntlm_blob_len) { char *buf; unsigned int ofs = 0; int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1; int oid_len = 4 + compute_asn_hdr_len_bytes(NTLMSSP_OID_LEN) * 2 + NTLMSSP_OID_LEN; int ntlmssp_len = 4 + compute_asn_hdr_len_bytes(ntlm_blob_len) * 2 + ntlm_blob_len; int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len + oid_len + ntlmssp_len) * 2 + neg_result_len + oid_len + ntlmssp_len; buf = kmalloc(total_len, KSMBD_DEFAULT_GFP); if (!buf) return -ENOMEM; /* insert main gss header */ encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len + oid_len + ntlmssp_len); /* insert neg result */ encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1); buf[ofs++] = 1; /* insert oid */ encode_asn_tag(buf, &ofs, 0xa1, 0x06, NTLMSSP_OID_LEN); memcpy(buf + ofs, NTLMSSP_OID_STR, NTLMSSP_OID_LEN); ofs += NTLMSSP_OID_LEN; /* insert response token - ntlmssp blob */ encode_asn_tag(buf, &ofs, 0xa2, 0x04, ntlm_blob_len); memcpy(buf + ofs, ntlm_blob, ntlm_blob_len); ofs += ntlm_blob_len; *pbuffer = buf; *buflen = total_len; return 0; } int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, int neg_result) { char *buf; unsigned int ofs = 0; int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1; int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 + neg_result_len; buf = kmalloc(total_len, KSMBD_DEFAULT_GFP); if (!buf) return -ENOMEM; /* insert main gss header */ encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len); /* insert neg result */ encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1); if (neg_result) buf[ofs++] = 2; else buf[ofs++] = 0; *pbuffer = buf; *buflen = total_len; return 0; } int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { enum OID oid; oid = look_up_OID(value, vlen); if (oid != OID_spnego) { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf); return -EBADMSG; } return 0; } int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct ksmbd_conn *conn = context; enum OID oid; int mech_type; oid = look_up_OID(value, vlen); if (oid == OID_ntlmssp) { mech_type = KSMBD_AUTH_NTLMSSP; } else if (oid == OID_mskrb5) { mech_type = KSMBD_AUTH_MSKRB5; } else if (oid == OID_krb5) { mech_type = KSMBD_AUTH_KRB5; } else if (oid == OID_krb5u2u) { mech_type = KSMBD_AUTH_KRB5U2U; } else { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf); return -EBADMSG; } conn->auth_mechs |= mech_type; if (conn->preferred_auth_mech == 0) conn->preferred_auth_mech = mech_type; return 0; } static int ksmbd_neg_token_alloc(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct ksmbd_conn *conn = context; if (!vlen) return -EINVAL; conn->mechToken = kmemdup_nul(value, vlen, KSMBD_DEFAULT_GFP); if (!conn->mechToken) return -ENOMEM; conn->mechTokenLen = (unsigned int)vlen; return 0; } int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen); } int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen); }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2009 ST-Ericsson SA * * Author: Srinidhi KASAGAR <[email protected]> */ #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/platform_data/arm-ux500-pm.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/regulator/machine.h> #include <asm/outercache.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> static int __init ux500_l2x0_unlock(void) { int i; struct device_node *np; void __iomem *l2x0_base; np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); l2x0_base = of_iomap(np, 0); of_node_put(np); if (!l2x0_base) return -ENODEV; /* * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions * apparently locks both caches before jumping to the kernel. The * l2x0 core will not touch the unlock registers if the l2x0 is * already enabled, so we do it right here instead. The PL310 has * 8 sets of registers, one per possible CPU. */ for (i = 0; i < 8; i++) { writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + i * L2X0_LOCKDOWN_STRIDE); writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + i * L2X0_LOCKDOWN_STRIDE); } iounmap(l2x0_base); return 0; } static void ux500_l2c310_write_sec(unsigned long val, unsigned reg) { /* * We can't write to secure registers as we are in non-secure * mode, until we have some SMI service available. */ } /* * FIXME: Should we set up the GPIO domain here? * * The problem is that we cannot put the interrupt resources into the platform * device until the irqdomain has been added. Right now, we set the GIC interrupt * domain from init_irq(), then load the gpio driver from * core_initcall(nmk_gpio_init) and add the platform devices from * arch_initcall(customize_machine). * * This feels fragile because it depends on the gpio device getting probed * _before_ any device uses the gpio interrupts. */ static void __init ux500_init_irq(void) { struct device_node *np; struct resource r; irqchip_init(); prcmu_early_init(); np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu"); of_address_to_resource(np, 0, &r); of_node_put(np); if (!r.start) { pr_err("could not find PRCMU base resource\n"); return; } ux500_pm_init(r.start, r.end-r.start); /* Unlock before init */ ux500_l2x0_unlock(); outer_cache.write_sec = ux500_l2c310_write_sec; } static void ux500_restart(enum reboot_mode mode, const char *cmd) { local_irq_disable(); local_fiq_disable(); prcmu_system_reset(0); } static const struct of_device_id u8500_local_bus_nodes[] = { /* only create devices below soc node */ { .compatible = "stericsson,db8500", }, { .compatible = "simple-bus"}, { }, }; static void __init u8500_init_machine(void) { of_platform_populate(NULL, u8500_local_bus_nodes, NULL, NULL); } static const char * stericsson_dt_platform_compat[] = { "st-ericsson,u8500", "st-ericsson,u9500", NULL, }; DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = ux500_init_irq, .init_machine = u8500_init_machine, .dt_compat = stericsson_dt_platform_compat, .restart = ux500_restart, MACHINE_END
// SPDX-License-Identifier: GPL-2.0+ /* Applied Micro X-Gene SoC MDIO Driver * * Copyright (c) 2016, Applied Micro Circuits Corporation * Author: Iyappan Subramanian <[email protected]> */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/efi.h> #include <linux/if_vlan.h> #include <linux/io.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/prefetch.h> #include <linux/property.h> #include <net/ip.h> u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; u32 done, rd_data = BUSY_MASK; u8 wait = 10; addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET; rd = pdata->mac_csr_addr + MAC_READ_REG_OFFSET; cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (done) rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); return rd_data; } EXPORT_SYMBOL(xgene_mdio_rd_mac); void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data) { void __iomem *addr, *wr, *cmd, *cmd_done; u8 wait = 10; u32 done; addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET; wr = pdata->mac_csr_addr + MAC_WRITE_REG_OFFSET; cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(wr_addr, addr); iowrite32(data, wr); iowrite32(XGENE_ENET_WR_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); } EXPORT_SYMBOL(xgene_mdio_wr_mac); int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg) { struct xgene_mdio_pdata *pdata = bus->priv; u32 data, done; u8 wait = 10; data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, data); xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); do { usleep_range(5, 10); done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR); } while ((done & BUSY_MASK) && wait--); if (done & BUSY_MASK) { dev_err(&bus->dev, "MII_MGMT read failed\n"); return -EBUSY; } data = xgene_mdio_rd_mac(pdata, MII_MGMT_STATUS_ADDR); xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); return data; } EXPORT_SYMBOL(xgene_mdio_rgmii_read); int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { struct xgene_mdio_pdata *pdata = bus->priv; u32 val, done; u8 wait = 10; val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, val); xgene_mdio_wr_mac(pdata, MII_MGMT_CONTROL_ADDR, data); do { usleep_range(5, 10); done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR); } while ((done & BUSY_MASK) && wait--); if (done & BUSY_MASK) { dev_err(&bus->dev, "MII_MGMT write failed\n"); return -EBUSY; } return 0; } EXPORT_SYMBOL(xgene_mdio_rgmii_write); static u32 xgene_menet_rd_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset) { return ioread32(pdata->diag_csr_addr + offset); } static void xgene_menet_wr_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset, u32 val) { iowrite32(val, pdata->diag_csr_addr + offset); } static int xgene_enet_ecc_init(struct xgene_mdio_pdata *pdata) { u32 data; u8 wait = 10; xgene_menet_wr_diag_csr(pdata, MENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); do { usleep_range(100, 110); data = xgene_menet_rd_diag_csr(pdata, MENET_BLOCK_MEM_RDY_ADDR); } while ((data != 0xffffffff) && wait--); if (data != 0xffffffff) { dev_err(pdata->dev, "Failed to release memory from shutdown\n"); return -ENODEV; } return 0; } static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata) { xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET); xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0); } static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) { int ret; if (pdata->dev->of_node) { clk_prepare_enable(pdata->clk); udelay(5); clk_disable_unprepare(pdata->clk); udelay(5); clk_prepare_enable(pdata->clk); udelay(5); } else { #ifdef CONFIG_ACPI acpi_evaluate_object(ACPI_HANDLE(pdata->dev), "_RST", NULL, NULL); #endif } ret = xgene_enet_ecc_init(pdata); if (ret) { if (pdata->dev->of_node) clk_disable_unprepare(pdata->clk); return ret; } xgene_gmac_reset(pdata); return 0; } static void xgene_enet_rd_mdio_csr(void __iomem *base_addr, u32 offset, u32 *val) { void __iomem *addr = base_addr + offset; *val = ioread32(addr); } static void xgene_enet_wr_mdio_csr(void __iomem *base_addr, u32 offset, u32 val) { void __iomem *addr = base_addr + offset; iowrite32(val, addr); } static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { void __iomem *addr = (void __iomem *)bus->priv; int timeout = 100; u32 status, val; val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | SET_VAL(HSTMIIMWRDAT, data); xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); do { usleep_range(5, 10); xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); } while ((status & BUSY_MASK) && timeout--); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); return 0; } static int xgene_xfi_mdio_read(struct mii_bus *bus, int phy_id, int reg) { void __iomem *addr = (void __iomem *)bus->priv; u32 data, status, val; int timeout = 100; val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg); xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_READ); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); do { usleep_range(5, 10); xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); } while ((status & BUSY_MASK) && timeout--); if (status & BUSY_MASK) { pr_err("XGENET_MII_MGMT write failed\n"); return -EBUSY; } xgene_enet_rd_mdio_csr(addr, MIIMRD_FIELD_ADDR, &data); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); return data; } struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr) { struct phy_device *phy_dev; phy_dev = get_phy_device(bus, phy_addr, false); if (!phy_dev || IS_ERR(phy_dev)) return NULL; if (phy_device_register(phy_dev)) phy_device_free(phy_dev); return phy_dev; } EXPORT_SYMBOL(xgene_enet_phy_register); #ifdef CONFIG_ACPI static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl, void *context, void **ret) { struct mii_bus *mdio = context; struct acpi_device *adev; struct phy_device *phy_dev; const union acpi_object *obj; u32 phy_addr; adev = acpi_fetch_acpi_dev(handle); if (!adev) return AE_OK; if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj)) return AE_OK; phy_addr = obj->integer.value; phy_dev = xgene_enet_phy_register(mdio, phy_addr); adev->driver_data = phy_dev; return AE_OK; } #endif static const struct of_device_id xgene_mdio_of_match[] = { { .compatible = "apm,xgene-mdio-rgmii", .data = (void *)XGENE_MDIO_RGMII }, { .compatible = "apm,xgene-mdio-xfi", .data = (void *)XGENE_MDIO_XFI }, {}, }; MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_mdio_acpi_match[] = { { "APMC0D65", XGENE_MDIO_RGMII }, { "APMC0D66", XGENE_MDIO_XFI }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); #endif static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; mdio_id = (uintptr_t)device_get_match_data(&pdev->dev); if (!mdio_id) return -ENODEV; pdata = devm_kzalloc(dev, sizeof(struct xgene_mdio_pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; pdata->mdio_id = mdio_id; pdata->dev = dev; csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csr_base)) return PTR_ERR(csr_base); pdata->mac_csr_addr = csr_base; pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET; pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET; if (mdio_id == XGENE_MDIO_RGMII) spin_lock_init(&pdata->mac_lock); if (dev->of_node) { pdata->clk = devm_clk_get(dev, NULL); if (IS_ERR(pdata->clk)) { dev_err(dev, "Unable to retrieve clk\n"); return PTR_ERR(pdata->clk); } } ret = xgene_mdio_reset(pdata); if (ret) return ret; mdio_bus = mdiobus_alloc(); if (!mdio_bus) { ret = -ENOMEM; goto out_clk; } mdio_bus->name = "APM X-Gene MDIO bus"; if (mdio_id == XGENE_MDIO_RGMII) { mdio_bus->read = xgene_mdio_rgmii_read; mdio_bus->write = xgene_mdio_rgmii_write; mdio_bus->priv = (void __force *)pdata; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", "xgene-mii-rgmii"); } else { mdio_bus->read = xgene_xfi_mdio_read; mdio_bus->write = xgene_xfi_mdio_write; mdio_bus->priv = (void __force *)pdata->mdio_csr_addr; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", "xgene-mii-xfi"); } mdio_bus->parent = dev; platform_set_drvdata(pdev, pdata); if (dev->of_node) { ret = of_mdiobus_register(mdio_bus, dev->of_node); } else { #ifdef CONFIG_ACPI /* Mask out all PHYs from auto probing. */ mdio_bus->phy_mask = ~0; ret = mdiobus_register(mdio_bus); if (ret) goto out_mdiobus; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, acpi_register_phy, NULL, mdio_bus, NULL); #endif } if (ret) goto out_mdiobus; pdata->mdio_bus = mdio_bus; return 0; out_mdiobus: mdiobus_free(mdio_bus); out_clk: if (dev->of_node) clk_disable_unprepare(pdata->clk); return ret; } static void xgene_mdio_remove(struct platform_device *pdev) { struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev); struct mii_bus *mdio_bus = pdata->mdio_bus; struct device *dev = &pdev->dev; mdiobus_unregister(mdio_bus); mdiobus_free(mdio_bus); if (dev->of_node) clk_disable_unprepare(pdata->clk); } static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", .of_match_table = xgene_mdio_of_match, .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), }, .probe = xgene_mdio_probe, .remove = xgene_mdio_remove, }; module_platform_driver(xgene_mdio_driver); MODULE_DESCRIPTION("APM X-Gene SoC MDIO driver"); MODULE_AUTHOR("Iyappan Subramanian <[email protected]>"); MODULE_LICENSE("GPL");
/* * Instruction formats for the sequencer program downloaded to * Aic7xxx SCSI host adapters * * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#12 $ * * $FreeBSD$ */ #include <asm/byteorder.h> /* 8bit ALU logic operations */ struct ins_format1 { #ifdef __LITTLE_ENDIAN uint32_t immediate : 8, source : 9, destination : 9, ret : 1, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, ret : 1, destination : 9, source : 9, immediate : 8; #endif }; /* 8bit ALU shift/rotate operations */ struct ins_format2 { #ifdef __LITTLE_ENDIAN uint32_t shift_control : 8, source : 9, destination : 9, ret : 1, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, ret : 1, destination : 9, source : 9, shift_control : 8; #endif }; /* 8bit branch control operations */ struct ins_format3 { #ifdef __LITTLE_ENDIAN uint32_t immediate : 8, source : 9, address : 10, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, address : 10, source : 9, immediate : 8; #endif }; /* 16bit ALU logic operations */ struct ins_format4 { #ifdef __LITTLE_ENDIAN uint32_t opcode_ext : 8, source : 9, destination : 9, ret : 1, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, ret : 1, destination : 9, source : 9, opcode_ext : 8; #endif }; /* 16bit branch control operations */ struct ins_format5 { #ifdef __LITTLE_ENDIAN uint32_t opcode_ext : 8, source : 9, address : 10, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, address : 10, source : 9, opcode_ext : 8; #endif }; /* Far branch operations */ struct ins_format6 { #ifdef __LITTLE_ENDIAN uint32_t page : 3, opcode_ext : 5, source : 9, address : 10, opcode : 4, parity : 1; #else uint32_t parity : 1, opcode : 4, address : 10, source : 9, opcode_ext : 5, page : 3; #endif }; union ins_formats { struct ins_format1 format1; struct ins_format2 format2; struct ins_format3 format3; struct ins_format4 format4; struct ins_format5 format5; struct ins_format6 format6; uint8_t bytes[4]; uint32_t integer; }; struct instruction { union ins_formats format; u_int srcline; struct symbol *patch_label; STAILQ_ENTRY(instruction) links; }; #define AIC_OP_OR 0x0 #define AIC_OP_AND 0x1 #define AIC_OP_XOR 0x2 #define AIC_OP_ADD 0x3 #define AIC_OP_ADC 0x4 #define AIC_OP_ROL 0x5 #define AIC_OP_BMOV 0x6 #define AIC_OP_MVI16 0x7 #define AIC_OP_JMP 0x8 #define AIC_OP_JC 0x9 #define AIC_OP_JNC 0xa #define AIC_OP_CALL 0xb #define AIC_OP_JNE 0xc #define AIC_OP_JNZ 0xd #define AIC_OP_JE 0xe #define AIC_OP_JZ 0xf /* Pseudo Ops */ #define AIC_OP_SHL 0x10 #define AIC_OP_SHR 0x20 #define AIC_OP_ROR 0x30 /* 16bit Ops. Low byte main opcode. High byte extended opcode. */ #define AIC_OP_OR16 0x8005 #define AIC_OP_AND16 0x8105 #define AIC_OP_XOR16 0x8205 #define AIC_OP_ADD16 0x8305 #define AIC_OP_ADC16 0x8405 #define AIC_OP_JNE16 0x8805 #define AIC_OP_JNZ16 0x8905 #define AIC_OP_JE16 0x8C05 #define AIC_OP_JZ16 0x8B05 #define AIC_OP_JMP16 0x9005 #define AIC_OP_JC16 0x9105 #define AIC_OP_JNC16 0x9205 #define AIC_OP_CALL16 0x9305 /* Page extension is low three bits of second opcode byte. */ #define AIC_OP_JMPF 0xA005 #define AIC_OP_CALLF 0xB005 #define AIC_OP_JCF 0xC005 #define AIC_OP_JNCF 0xD005 #define AIC_OP_CMPXCHG 0xE005
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DC_MCIF_WB_DCN20_H__ #define __DC_MCIF_WB_DCN20_H__ #define TO_DCN20_MMHUBBUB(mcif_wb_base) \ container_of(mcif_wb_base, struct dcn20_mmhubbub, base) #define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \ SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\ SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\ SRI(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\ SRI(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst),\ SRI(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB, inst),\ SRI(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MCIF_WB, inst),\ SRI(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_WATERMARK, MCIF_WB, inst),\ SRI(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_WARM_UP_CNTL, MCIF_WB, inst),\ SRI(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\ SRI(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\ SRI(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\ SRI(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\ SRI(SMU_WM_CONTROL, WBIF, inst) #define MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_P_VMID, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB_BUFMGR_CUR_LINE_R, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FIELD, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_LONG_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SHORT_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FRAME_LENGTH_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_CUR_LINE_R, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FIELD, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_LONG_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SHORT_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FRAME_LENGTH_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_CUR_LINE_R, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FIELD, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_LONG_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SHORT_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FRAME_LENGTH_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_CUR_LINE_R, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FIELD, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_LONG_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SHORT_LINE_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FRAME_LENGTH_ERROR, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_CUR_LINE_R, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\ SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB_TEST_DEBUG_INDEX, mask_sh),\ SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA, MCIF_WB_TEST_DEBUG_DATA, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\ SF(MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, mask_sh),\ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\ SF(MCIF_WB0_MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_SECURITY_LEVEL, MCIF_WB_SECURITY_LEVEL, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, mask_sh),\ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, mask_sh),\ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, mask_sh),\ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, mask_sh) #define MCIF_WB_REG_FIELD_LIST_DCN2_0(type) \ type MCIF_WB_BUFMGR_ENABLE;\ type MCIF_WB_BUFMGR_SW_INT_EN;\ type MCIF_WB_BUFMGR_SW_INT_ACK;\ type MCIF_WB_BUFMGR_SW_SLICE_INT_EN;\ type MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN;\ type MCIF_WB_BUFMGR_SW_LOCK;\ type MCIF_WB_P_VMID;\ type MCIF_WB_BUF_ADDR_FENCE_EN;\ type MCIF_WB_BUFMGR_CUR_LINE_R;\ type MCIF_WB_BUFMGR_VCE_INT_STATUS;\ type MCIF_WB_BUFMGR_SW_INT_STATUS;\ type MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS;\ type MCIF_WB_BUFMGR_CUR_BUF;\ type MCIF_WB_BUFMGR_BUFTAG;\ type MCIF_WB_BUFMGR_CUR_LINE_L;\ type MCIF_WB_BUFMGR_NEXT_BUF;\ type MCIF_WB_BUF_LUMA_PITCH;\ type MCIF_WB_BUF_CHROMA_PITCH;\ type MCIF_WB_BUF_1_ACTIVE;\ type MCIF_WB_BUF_1_SW_LOCKED;\ type MCIF_WB_BUF_1_VCE_LOCKED;\ type MCIF_WB_BUF_1_OVERFLOW;\ type MCIF_WB_BUF_1_DISABLE;\ type MCIF_WB_BUF_1_MODE;\ type MCIF_WB_BUF_1_BUFTAG;\ type MCIF_WB_BUF_1_NXT_BUF;\ type MCIF_WB_BUF_1_FIELD;\ type MCIF_WB_BUF_1_CUR_LINE_L;\ type MCIF_WB_BUF_1_LONG_LINE_ERROR;\ type MCIF_WB_BUF_1_SHORT_LINE_ERROR;\ type MCIF_WB_BUF_1_FRAME_LENGTH_ERROR;\ type MCIF_WB_BUF_1_CUR_LINE_R;\ type MCIF_WB_BUF_1_NEW_CONTENT;\ type MCIF_WB_BUF_1_COLOR_DEPTH;\ type MCIF_WB_BUF_1_TMZ_BLACK_PIXEL;\ type MCIF_WB_BUF_1_TMZ;\ type MCIF_WB_BUF_1_Y_OVERRUN;\ type MCIF_WB_BUF_1_C_OVERRUN;\ type MCIF_WB_BUF_2_ACTIVE;\ type MCIF_WB_BUF_2_SW_LOCKED;\ type MCIF_WB_BUF_2_VCE_LOCKED;\ type MCIF_WB_BUF_2_OVERFLOW;\ type MCIF_WB_BUF_2_DISABLE;\ type MCIF_WB_BUF_2_MODE;\ type MCIF_WB_BUF_2_BUFTAG;\ type MCIF_WB_BUF_2_NXT_BUF;\ type MCIF_WB_BUF_2_FIELD;\ type MCIF_WB_BUF_2_CUR_LINE_L;\ type MCIF_WB_BUF_2_LONG_LINE_ERROR;\ type MCIF_WB_BUF_2_SHORT_LINE_ERROR;\ type MCIF_WB_BUF_2_FRAME_LENGTH_ERROR;\ type MCIF_WB_BUF_2_CUR_LINE_R;\ type MCIF_WB_BUF_2_NEW_CONTENT;\ type MCIF_WB_BUF_2_COLOR_DEPTH;\ type MCIF_WB_BUF_2_TMZ_BLACK_PIXEL;\ type MCIF_WB_BUF_2_TMZ;\ type MCIF_WB_BUF_2_Y_OVERRUN;\ type MCIF_WB_BUF_2_C_OVERRUN;\ type MCIF_WB_BUF_3_ACTIVE;\ type MCIF_WB_BUF_3_SW_LOCKED;\ type MCIF_WB_BUF_3_VCE_LOCKED;\ type MCIF_WB_BUF_3_OVERFLOW;\ type MCIF_WB_BUF_3_DISABLE;\ type MCIF_WB_BUF_3_MODE;\ type MCIF_WB_BUF_3_BUFTAG;\ type MCIF_WB_BUF_3_NXT_BUF;\ type MCIF_WB_BUF_3_FIELD;\ type MCIF_WB_BUF_3_CUR_LINE_L;\ type MCIF_WB_BUF_3_LONG_LINE_ERROR;\ type MCIF_WB_BUF_3_SHORT_LINE_ERROR;\ type MCIF_WB_BUF_3_FRAME_LENGTH_ERROR;\ type MCIF_WB_BUF_3_CUR_LINE_R;\ type MCIF_WB_BUF_3_NEW_CONTENT;\ type MCIF_WB_BUF_3_COLOR_DEPTH;\ type MCIF_WB_BUF_3_TMZ_BLACK_PIXEL;\ type MCIF_WB_BUF_3_TMZ;\ type MCIF_WB_BUF_3_Y_OVERRUN;\ type MCIF_WB_BUF_3_C_OVERRUN;\ type MCIF_WB_BUF_4_ACTIVE;\ type MCIF_WB_BUF_4_SW_LOCKED;\ type MCIF_WB_BUF_4_VCE_LOCKED;\ type MCIF_WB_BUF_4_OVERFLOW;\ type MCIF_WB_BUF_4_DISABLE;\ type MCIF_WB_BUF_4_MODE;\ type MCIF_WB_BUF_4_BUFTAG;\ type MCIF_WB_BUF_4_NXT_BUF;\ type MCIF_WB_BUF_4_FIELD;\ type MCIF_WB_BUF_4_CUR_LINE_L;\ type MCIF_WB_BUF_4_LONG_LINE_ERROR;\ type MCIF_WB_BUF_4_SHORT_LINE_ERROR;\ type MCIF_WB_BUF_4_FRAME_LENGTH_ERROR;\ type MCIF_WB_BUF_4_CUR_LINE_R;\ type MCIF_WB_BUF_4_NEW_CONTENT;\ type MCIF_WB_BUF_4_COLOR_DEPTH;\ type MCIF_WB_BUF_4_TMZ_BLACK_PIXEL;\ type MCIF_WB_BUF_4_TMZ;\ type MCIF_WB_BUF_4_Y_OVERRUN;\ type MCIF_WB_BUF_4_C_OVERRUN;\ type MCIF_WB_CLIENT_ARBITRATION_SLICE;\ type MCIF_WB_TIME_PER_PIXEL;\ type WM_CHANGE_ACK_FORCE_ON;\ type MCIF_WB_CLI_WATERMARK_MASK;\ type MCIF_WB_TEST_DEBUG_INDEX;\ type MCIF_WB_TEST_DEBUG_DATA;\ type MCIF_WB_BUF_1_ADDR_Y;\ type MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ type MCIF_WB_BUF_1_ADDR_C;\ type MCIF_WB_BUF_1_ADDR_C_OFFSET;\ type MCIF_WB_BUF_2_ADDR_Y;\ type MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ type MCIF_WB_BUF_2_ADDR_C;\ type MCIF_WB_BUF_2_ADDR_C_OFFSET;\ type MCIF_WB_BUF_3_ADDR_Y;\ type MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ type MCIF_WB_BUF_3_ADDR_C;\ type MCIF_WB_BUF_3_ADDR_C_OFFSET;\ type MCIF_WB_BUF_4_ADDR_Y;\ type MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ type MCIF_WB_BUF_4_ADDR_C;\ type MCIF_WB_BUF_4_ADDR_C_OFFSET;\ type MCIF_WB_BUFMGR_VCE_LOCK_IGNORE;\ type MCIF_WB_BUFMGR_VCE_INT_EN;\ type MCIF_WB_BUFMGR_VCE_INT_ACK;\ type MCIF_WB_BUFMGR_VCE_SLICE_INT_EN;\ type MCIF_WB_BUFMGR_VCE_LOCK;\ type MCIF_WB_BUFMGR_SLICE_SIZE;\ type NB_PSTATE_CHANGE_REFRESH_WATERMARK;\ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST;\ type NB_PSTATE_CHANGE_FORCE_ON;\ type NB_PSTATE_ALLOW_FOR_URGENT;\ type NB_PSTATE_CHANGE_WATERMARK_MASK;\ type MCIF_WB_CLI_WATERMARK;\ type MCIF_WB_CLI_CLOCK_GATER_OVERRIDE;\ type MCIF_WB_PITCH_SIZE_WARMUP;\ type DIS_REFRESH_UNDER_NBPREQ;\ type PERFRAME_SELF_REFRESH;\ type MAX_SCALED_TIME_TO_URGENT;\ type MCIF_WB_SECURITY_LEVEL;\ type MCIF_WB_BUF_LUMA_SIZE;\ type MCIF_WB_BUF_CHROMA_SIZE;\ type MCIF_WB_BUF_1_ADDR_Y_HIGH;\ type MCIF_WB_BUF_1_ADDR_C_HIGH;\ type MCIF_WB_BUF_2_ADDR_Y_HIGH;\ type MCIF_WB_BUF_2_ADDR_C_HIGH;\ type MCIF_WB_BUF_3_ADDR_Y_HIGH;\ type MCIF_WB_BUF_3_ADDR_C_HIGH;\ type MCIF_WB_BUF_4_ADDR_Y_HIGH;\ type MCIF_WB_BUF_4_ADDR_C_HIGH;\ type MCIF_WB_BUF_1_RESOLUTION_WIDTH;\ type MCIF_WB_BUF_1_RESOLUTION_HEIGHT;\ type MCIF_WB_BUF_2_RESOLUTION_WIDTH;\ type MCIF_WB_BUF_2_RESOLUTION_HEIGHT;\ type MCIF_WB_BUF_3_RESOLUTION_WIDTH;\ type MCIF_WB_BUF_3_RESOLUTION_HEIGHT;\ type MCIF_WB_BUF_4_RESOLUTION_WIDTH;\ type MCIF_WB_BUF_4_RESOLUTION_HEIGHT;\ type MCIF_WB0_WM_CHG_SEL;\ type MCIF_WB0_WM_CHG_REQ;\ type MCIF_WB0_WM_CHG_ACK_INT_DIS;\ type MCIF_WB0_WM_CHG_ACK_INT_STATUS #define MCIF_WB_REG_VARIABLE_LIST_DCN2_0 \ uint32_t MCIF_WB_BUFMGR_SW_CONTROL;\ uint32_t MCIF_WB_BUFMGR_CUR_LINE_R;\ uint32_t MCIF_WB_BUFMGR_STATUS;\ uint32_t MCIF_WB_BUF_PITCH;\ uint32_t MCIF_WB_BUF_1_STATUS;\ uint32_t MCIF_WB_BUF_1_STATUS2;\ uint32_t MCIF_WB_BUF_2_STATUS;\ uint32_t MCIF_WB_BUF_2_STATUS2;\ uint32_t MCIF_WB_BUF_3_STATUS;\ uint32_t MCIF_WB_BUF_3_STATUS2;\ uint32_t MCIF_WB_BUF_4_STATUS;\ uint32_t MCIF_WB_BUF_4_STATUS2;\ uint32_t MCIF_WB_ARBITRATION_CONTROL;\ uint32_t MCIF_WB_SCLK_CHANGE;\ uint32_t MCIF_WB_TEST_DEBUG_INDEX;\ uint32_t MCIF_WB_TEST_DEBUG_DATA;\ uint32_t MCIF_WB_BUF_1_ADDR_Y;\ uint32_t MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ uint32_t MCIF_WB_BUF_1_ADDR_C;\ uint32_t MCIF_WB_BUF_1_ADDR_C_OFFSET;\ uint32_t MCIF_WB_BUF_2_ADDR_Y;\ uint32_t MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ uint32_t MCIF_WB_BUF_2_ADDR_C;\ uint32_t MCIF_WB_BUF_2_ADDR_C_OFFSET;\ uint32_t MCIF_WB_BUF_3_ADDR_Y;\ uint32_t MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ uint32_t MCIF_WB_BUF_3_ADDR_C;\ uint32_t MCIF_WB_BUF_3_ADDR_C_OFFSET;\ uint32_t MCIF_WB_BUF_4_ADDR_Y;\ uint32_t MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ uint32_t MCIF_WB_BUF_4_ADDR_C;\ uint32_t MCIF_WB_BUF_4_ADDR_C_OFFSET;\ uint32_t MCIF_WB_BUFMGR_VCE_CONTROL;\ uint32_t MCIF_WB_NB_PSTATE_LATENCY_WATERMARK;\ uint32_t MCIF_WB_NB_PSTATE_CONTROL;\ uint32_t MCIF_WB_WATERMARK;\ uint32_t MCIF_WB_CLOCK_GATER_CONTROL;\ uint32_t MCIF_WB_WARM_UP_CNTL;\ uint32_t MCIF_WB_SELF_REFRESH_CONTROL;\ uint32_t MULTI_LEVEL_QOS_CTRL;\ uint32_t MCIF_WB_SECURITY_LEVEL;\ uint32_t MCIF_WB_BUF_LUMA_SIZE;\ uint32_t MCIF_WB_BUF_CHROMA_SIZE;\ uint32_t MCIF_WB_BUF_1_ADDR_Y_HIGH;\ uint32_t MCIF_WB_BUF_1_ADDR_C_HIGH;\ uint32_t MCIF_WB_BUF_2_ADDR_Y_HIGH;\ uint32_t MCIF_WB_BUF_2_ADDR_C_HIGH;\ uint32_t MCIF_WB_BUF_3_ADDR_Y_HIGH;\ uint32_t MCIF_WB_BUF_3_ADDR_C_HIGH;\ uint32_t MCIF_WB_BUF_4_ADDR_Y_HIGH;\ uint32_t MCIF_WB_BUF_4_ADDR_C_HIGH;\ uint32_t MCIF_WB_BUF_1_RESOLUTION;\ uint32_t MCIF_WB_BUF_2_RESOLUTION;\ uint32_t MCIF_WB_BUF_3_RESOLUTION;\ uint32_t MCIF_WB_BUF_4_RESOLUTION;\ uint32_t SMU_WM_CONTROL struct dcn20_mmhubbub_registers { MCIF_WB_REG_VARIABLE_LIST_DCN2_0; }; struct dcn20_mmhubbub_mask { MCIF_WB_REG_FIELD_LIST_DCN2_0(uint32_t); }; struct dcn20_mmhubbub_shift { MCIF_WB_REG_FIELD_LIST_DCN2_0(uint8_t); }; struct dcn20_mmhubbub { struct mcif_wb base; const struct dcn20_mmhubbub_registers *mcif_wb_regs; const struct dcn20_mmhubbub_shift *mcif_wb_shift; const struct dcn20_mmhubbub_mask *mcif_wb_mask; }; void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, struct mcif_irq_params *params); void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb); void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb); void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, struct mcif_buf_params *mcif_params, enum dwb_scaler_mode out_format, unsigned int dest_width, unsigned int dest_height, struct mcif_wb_frame_dump_info *dump_info, unsigned char *luma_buffer, unsigned char *chroma_buffer, unsigned char *dest_luma_buffer, unsigned char *dest_chroma_buffer); void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, struct dc_context *ctx, const struct dcn20_mmhubbub_registers *mcif_wb_regs, const struct dcn20_mmhubbub_shift *mcif_wb_shift, const struct dcn20_mmhubbub_mask *mcif_wb_mask, int inst); #endif
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ #include "cxgb4.h" #include "cxgb4_tc_matchall.h" #include "sched.h" #include "cxgb4_uld.h" #include "cxgb4_filter.h" #include "cxgb4_tc_flower.h" static int cxgb4_policer_validate(const struct flow_action *action, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { if (act->police.exceed.act_id != FLOW_ACTION_DROP) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop"); return -EOPNOTSUPP; } if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok"); return -EOPNOTSUPP; } if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && !flow_action_is_last_entry(action, act)) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is ok, but action is not last"); return -EOPNOTSUPP; } if (act->police.peakrate_bytes_ps || act->police.avrate || act->police.overhead) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when peakrate/avrate/overhead is configured"); return -EOPNOTSUPP; } if (act->police.rate_pkt_ps) { NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); return -EOPNOTSUPP; } return 0; } static int cxgb4_matchall_egress_validate(struct net_device *dev, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct flow_action *actions = &cls->rule->action; struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; struct ch_sched_queue qe; struct sched_class *e; u64 max_link_rate; u32 i, speed; int ret; if (!flow_action_has_entries(actions)) { NL_SET_ERR_MSG_MOD(extack, "Egress MATCHALL offload needs at least 1 policing action"); return -EINVAL; } else if (!flow_offload_has_one_action(actions)) { NL_SET_ERR_MSG_MOD(extack, "Egress MATCHALL offload only supports 1 policing action"); return -EINVAL; } else if (pi->tc_block_shared) { NL_SET_ERR_MSG_MOD(extack, "Egress MATCHALL offload not supported with shared blocks"); return -EINVAL; } ret = t4_get_link_params(pi, NULL, &speed, NULL); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get max speed supported by the link"); return -EINVAL; } /* Convert from Mbps to bps */ max_link_rate = (u64)speed * 1000 * 1000; flow_action_for_each(i, entry, actions) { switch (entry->id) { case FLOW_ACTION_POLICE: ret = cxgb4_policer_validate(actions, entry, extack); if (ret) return ret; /* Convert bytes per second to bits per second */ if (entry->police.rate_bytes_ps * 8 > max_link_rate) { NL_SET_ERR_MSG_MOD(extack, "Specified policing max rate is larger than underlying link speed"); return -ERANGE; } break; default: NL_SET_ERR_MSG_MOD(extack, "Only policing action supported with Egress MATCHALL offload"); return -EOPNOTSUPP; } } for (i = 0; i < pi->nqsets; i++) { memset(&qe, 0, sizeof(qe)); qe.queue = i; e = cxgb4_sched_queue_lookup(dev, &qe); if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { NL_SET_ERR_MSG_MOD(extack, "Some queues are already bound to different class"); return -EBUSY; } } return 0; } static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) { struct port_info *pi = netdev2pinfo(dev); struct ch_sched_queue qe; int ret; u32 i; for (i = 0; i < pi->nqsets; i++) { qe.queue = i; qe.class = tc; ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); if (ret) goto out_free; } return 0; out_free: while (i--) { qe.queue = i; qe.class = SCHED_CLS_NONE; cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); } return ret; } static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); struct ch_sched_queue qe; u32 i; for (i = 0; i < pi->nqsets; i++) { qe.queue = i; qe.class = SCHED_CLS_NONE; cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); } } static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { struct ch_sched_params p = { .type = SCHED_CLASS_TYPE_PACKET, .u.params.level = SCHED_CLASS_LEVEL_CH_RL, .u.params.mode = SCHED_CLASS_MODE_CLASS, .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, .u.params.class = SCHED_CLS_NONE, .u.params.minrate = 0, .u.params.weight = 0, .u.params.pktsize = dev->mtu, }; struct netlink_ext_ack *extack = cls->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; struct sched_class *e; int ret; u32 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; flow_action_for_each(i, entry, &cls->rule->action) if (entry->id == FLOW_ACTION_POLICE) break; ret = cxgb4_policer_validate(&cls->rule->action, entry, extack); if (ret) return ret; /* Convert from bytes per second to Kbps */ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); p.u.params.channel = pi->tx_chan; e = cxgb4_sched_class_alloc(dev, &p); if (!e) { NL_SET_ERR_MSG_MOD(extack, "No free traffic class available for policing action"); return -ENOMEM; } ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Could not bind queues to traffic class"); goto out_free; } tc_port_matchall->egress.hwtc = e->idx; tc_port_matchall->egress.cookie = cls->cookie; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; out_free: cxgb4_sched_class_free(dev, e->idx); return ret; } static void cxgb4_matchall_free_tc(struct net_device *dev) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; cxgb4_matchall_tc_unbind_queues(dev); cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; tc_port_matchall->egress.cookie = 0; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; } static int cxgb4_matchall_mirror_alloc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); struct flow_action_entry *act; int ret; u32 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; flow_action_for_each(i, act, &cls->rule->action) { if (act->id == FLOW_ACTION_MIRRED) { ret = cxgb4_port_mirror_alloc(dev); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate mirror"); return ret; } tc_port_matchall->ingress.viid_mirror = pi->viid_mirror; break; } } return 0; } static void cxgb4_matchall_mirror_free(struct net_device *dev) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (!tc_port_matchall->ingress.viid_mirror) return; cxgb4_port_mirror_free(dev); tc_port_matchall->ingress.viid_mirror = 0; } static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type], &tc_port_matchall->ingress.fs[filter_type]); if (ret) return ret; tc_port_matchall->ingress.tid[filter_type] = 0; return 0; } static int cxgb4_matchall_add_filter(struct net_device *dev, struct tc_cls_matchall_offload *cls, u8 filter_type) { struct netlink_ext_ack *extack = cls->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); struct ch_filter_specification *fs; int ret, fidx; /* Get a free filter entry TID, where we can insert this new * rule. Only insert rule if its prio doesn't conflict with * existing rules. */ fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET, false, cls->common.prio); if (fidx < 0) { NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); return -ENOMEM; } tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; fs = &tc_port_matchall->ingress.fs[filter_type]; memset(fs, 0, sizeof(*fs)); if (fidx < adap->tids.nhpftids) fs->prio = 1; fs->tc_prio = cls->common.prio; fs->tc_cookie = cls->cookie; fs->type = filter_type; fs->hitcnts = 1; fs->val.pfvf_vld = 1; fs->val.pf = adap->pf; fs->val.vf = pi->vin; cxgb4_process_flow_actions(dev, &cls->rule->action, fs); ret = cxgb4_set_filter(dev, fidx, fs); if (ret) return ret; tc_port_matchall->ingress.tid[filter_type] = fidx; return 0; } static int cxgb4_matchall_alloc_filter(struct net_device *dev, struct tc_cls_matchall_offload *cls) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret, i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; ret = cxgb4_matchall_mirror_alloc(dev, cls); if (ret) return ret; for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { ret = cxgb4_matchall_add_filter(dev, cls, i); if (ret) goto out_free; } tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; out_free: while (i-- > 0) cxgb4_matchall_del_filter(dev, i); cxgb4_matchall_mirror_free(dev); return ret; } static int cxgb4_matchall_free_filter(struct net_device *dev) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { ret = cxgb4_matchall_del_filter(dev, i); if (ret) return ret; } cxgb4_matchall_mirror_free(dev); tc_port_matchall->ingress.packets = 0; tc_port_matchall->ingress.bytes = 0; tc_port_matchall->ingress.last_used = 0; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; return 0; } int cxgb4_tc_matchall_replace(struct net_device *dev, struct tc_cls_matchall_offload *cls_matchall, bool ingress) { struct netlink_ext_ack *extack = cls_matchall->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (ingress) { if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) { NL_SET_ERR_MSG_MOD(extack, "Only 1 Ingress MATCHALL can be offloaded"); return -ENOMEM; } ret = cxgb4_validate_flow_actions(dev, &cls_matchall->rule->action, extack, 1); if (ret) return ret; return cxgb4_matchall_alloc_filter(dev, cls_matchall); } if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { NL_SET_ERR_MSG_MOD(extack, "Only 1 Egress MATCHALL can be offloaded"); return -ENOMEM; } ret = cxgb4_matchall_egress_validate(dev, cls_matchall); if (ret) return ret; return cxgb4_matchall_alloc_tc(dev, cls_matchall); } int cxgb4_tc_matchall_destroy(struct net_device *dev, struct tc_cls_matchall_offload *cls_matchall, bool ingress) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (ingress) { /* All the filter types of this matchall rule save the * same cookie. So, checking for the first one is * enough. */ if (cls_matchall->cookie != tc_port_matchall->ingress.fs[0].tc_cookie) return -ENOENT; return cxgb4_matchall_free_filter(dev); } if (cls_matchall->cookie != tc_port_matchall->egress.cookie) return -ENOENT; cxgb4_matchall_free_tc(dev); return 0; } int cxgb4_tc_matchall_stats(struct net_device *dev, struct tc_cls_matchall_offload *cls_matchall) { u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0; struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_matchall_ingress_entry *ingress; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) return -ENOENT; ingress = &tc_port_matchall->ingress; for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { ret = cxgb4_get_filter_counters(dev, ingress->tid[i], &tmp_packets, &tmp_bytes, ingress->fs[i].hash); if (ret) return ret; packets += tmp_packets; bytes += tmp_bytes; } if (tc_port_matchall->ingress.packets != packets) { flow_stats_update(&cls_matchall->stats, bytes - tc_port_matchall->ingress.bytes, packets - tc_port_matchall->ingress.packets, 0, tc_port_matchall->ingress.last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); tc_port_matchall->ingress.packets = packets; tc_port_matchall->ingress.bytes = bytes; tc_port_matchall->ingress.last_used = jiffies; } return 0; } static void cxgb4_matchall_disable_offload(struct net_device *dev) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) cxgb4_matchall_free_tc(dev); if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) cxgb4_matchall_free_filter(dev); } int cxgb4_init_tc_matchall(struct adapter *adap) { struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_matchall *tc_matchall; int ret; tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); if (!tc_matchall) return -ENOMEM; tc_port_matchall = kcalloc(adap->params.nports, sizeof(*tc_port_matchall), GFP_KERNEL); if (!tc_port_matchall) { ret = -ENOMEM; goto out_free_matchall; } tc_matchall->port_matchall = tc_port_matchall; adap->tc_matchall = tc_matchall; return 0; out_free_matchall: kfree(tc_matchall); return ret; } void cxgb4_cleanup_tc_matchall(struct adapter *adap) { u8 i; if (adap->tc_matchall) { if (adap->tc_matchall->port_matchall) { for (i = 0; i < adap->params.nports; i++) { struct net_device *dev = adap->port[i]; if (dev) cxgb4_matchall_disable_offload(dev); } kfree(adap->tc_matchall->port_matchall); } kfree(adap->tc_matchall); } }
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2021 Intel Corporation */ #ifndef __I915_GEM_CREATE_H__ #define __I915_GEM_CREATE_H__ struct drm_file; struct drm_device; struct drm_mode_create_dumb; int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); #endif /* __I915_GEM_CREATE_H__ */
#ifndef __BPF_EXPERIMENTAL__ #define __BPF_EXPERIMENTAL__ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) /* Description * Allocates an object of the type represented by 'local_type_id' in * program BTF. User may use the bpf_core_type_id_local macro to pass the * type ID of a struct in program BTF. * * The 'local_type_id' parameter must be a known constant. * The 'meta' parameter is rewritten by the verifier, no need for BPF * program to set it. * Returns * A pointer to an object of the type corresponding to the passed in * 'local_type_id', or NULL on failure. */ extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; /* Convenience macro to wrap over bpf_obj_new_impl */ #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) /* Description * Free an allocated object. All fields of the object that require * destruction will be destructed before the storage is freed. * * The 'meta' parameter is rewritten by the verifier, no need for BPF * program to set it. * Returns * Void. */ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; /* Convenience macro to wrap over bpf_obj_drop_impl */ #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) /* Description * Increment the refcount on a refcounted local kptr, turning the * non-owning reference input into an owning reference in the process. * * The 'meta' parameter is rewritten by the verifier, no need for BPF * program to set it. * Returns * An owning reference to the object pointed to by 'kptr' */ extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; /* Convenience macro to wrap over bpf_refcount_acquire_impl */ #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) /* Description * Add a new entry to the beginning of the BPF linked list. * * The 'meta' and 'off' parameters are rewritten by the verifier, no need * for BPF programs to set them * Returns * 0 if the node was successfully added * -EINVAL if the node wasn't added because it's already in a list */ extern int bpf_list_push_front_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta, __u64 off) __ksym; /* Convenience macro to wrap over bpf_list_push_front_impl */ #define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) /* Description * Add a new entry to the end of the BPF linked list. * * The 'meta' and 'off' parameters are rewritten by the verifier, no need * for BPF programs to set them * Returns * 0 if the node was successfully added * -EINVAL if the node wasn't added because it's already in a list */ extern int bpf_list_push_back_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta, __u64 off) __ksym; /* Convenience macro to wrap over bpf_list_push_back_impl */ #define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) /* Description * Remove the entry at the beginning of the BPF linked list. * Returns * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. */ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; /* Description * Remove the entry at the end of the BPF linked list. * Returns * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. */ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; /* Description * Remove 'node' from rbtree with root 'root' * Returns * Pointer to the removed node, or NULL if 'root' didn't contain 'node' */ extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) __ksym; /* Description * Add 'node' to rbtree with root 'root' using comparator 'less' * * The 'meta' and 'off' parameters are rewritten by the verifier, no need * for BPF programs to set them * Returns * 0 if the node was successfully added * -EINVAL if the node wasn't added because it's already in a tree */ extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), void *meta, __u64 off) __ksym; /* Convenience macro to wrap over bpf_rbtree_add_impl */ #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) /* Description * Return the first (leftmost) node in input tree * Returns * Pointer to the node, which is _not_ removed from the tree. If the tree * contains no nodes, returns NULL. */ extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; /* Description * Allocates a percpu object of the type represented by 'local_type_id' in * program BTF. User may use the bpf_core_type_id_local macro to pass the * type ID of a struct in program BTF. * * The 'local_type_id' parameter must be a known constant. * The 'meta' parameter is rewritten by the verifier, no need for BPF * program to set it. * Returns * A pointer to a percpu object of the type corresponding to the passed in * 'local_type_id', or NULL on failure. */ extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym; /* Convenience macro to wrap over bpf_percpu_obj_new_impl */ #define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL)) /* Description * Free an allocated percpu object. All fields of the object that require * destruction will be destructed before the storage is freed. * * The 'meta' parameter is rewritten by the verifier, no need for BPF * program to set it. * Returns * Void. */ extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; struct bpf_iter_task_vma; extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, __u64 addr) __ksym; extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; /* Convenience macro to wrap over bpf_obj_drop_impl */ #define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL) /* Description * Throw a BPF exception from the program, immediately terminating its * execution and unwinding the stack. The supplied 'cookie' parameter * will be the return value of the program when an exception is thrown, * and the default exception callback is used. Otherwise, if an exception * callback is set using the '__exception_cb(callback)' declaration tag * on the main program, the 'cookie' parameter will be the callback's only * input argument. * * Thus, in case of default exception callback, 'cookie' is subjected to * constraints on the program's return value (as with R0 on exit). * Otherwise, the return value of the marked exception callback will be * subjected to the same checks. * * Note that throwing an exception with lingering resources (locks, * references, etc.) will lead to a verification error. * * Note that callbacks *cannot* call this helper. * Returns * Never. * Throws * An exception with the specified 'cookie' value. */ extern void bpf_throw(u64 cookie) __ksym; /* Description * Acquire a reference on the exe_file member field belonging to the * mm_struct that is nested within the supplied task_struct. The supplied * task_struct must be trusted/referenced. * Returns * A referenced file pointer pointing to the exe_file member field of the * mm_struct nested in the supplied task_struct, or NULL. */ extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym; /* Description * Release a reference on the supplied file. The supplied file must be * acquired. */ extern void bpf_put_file(struct file *file) __ksym; /* Description * Resolve a pathname for the supplied path and store it in the supplied * buffer. The supplied path must be trusted/referenced. * Returns * A positive integer corresponding to the length of the resolved pathname, * including the NULL termination character, stored in the supplied * buffer. On error, a negative integer is returned. */ extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym; /* This macro must be used to mark the exception callback corresponding to the * main program. For example: * * int exception_cb(u64 cookie) { * return cookie; * } * * SEC("tc") * __exception_cb(exception_cb) * int main_prog(struct __sk_buff *ctx) { * ... * return TC_ACT_OK; * } * * Here, exception callback for the main program will be 'exception_cb'. Note * that this attribute can only be used once, and multiple exception callbacks * specified for the main program will lead to verification error. */ #define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name))) #define __bpf_assert_signed(x) _Generic((x), \ unsigned long: 0, \ unsigned long long: 0, \ signed long: 1, \ signed long long: 1 \ ) #define __bpf_assert_check(LHS, op, RHS) \ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \ _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \ _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \ _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression") #define __bpf_assert(LHS, op, cons, RHS, VAL) \ ({ \ (void)bpf_throw; \ asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \ : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \ }) #define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \ ({ \ __bpf_assert_check(LHS, op, RHS); \ if (__bpf_assert_signed(LHS) && !(supp_sign)) \ __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \ else \ __bpf_assert(LHS, #op, cons, RHS, VAL); \ }) #define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \ ({ \ if (sizeof(typeof(RHS)) == 8) { \ const typeof(RHS) rhs_var = (RHS); \ __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \ } else { \ __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \ } \ }) #define __cmp_cannot_be_signed(x) \ __builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \ __builtin_strcmp(#x, "&") == 0 #define __is_signed_type(type) (((type)(-1)) < (type)1) #define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \ ({ \ __label__ l_true; \ bool ret = DEFAULT; \ asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \ :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \ ret = !DEFAULT; \ l_true: \ ret; \ }) /* C type conversions coupled with comparison operator are tricky. * Make sure BPF program is compiled with -Wsign-compare then * __lhs OP __rhs below will catch the mistake. * Be aware that we check only __lhs to figure out the sign of compare. */ #define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \ ({ \ typeof(LHS) __lhs = (LHS); \ typeof(RHS) __rhs = (RHS); \ bool ret; \ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \ (void)(__lhs OP __rhs); \ if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \ if (sizeof(__rhs) == 8) \ /* "i" will truncate 64-bit constant into s32, \ * so we have to use extra register via "r". \ */ \ ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \ else \ ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \ } else { \ if (sizeof(__rhs) == 8) \ ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \ else \ ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \ } \ ret; \ }) #ifndef bpf_cmp_unlikely #define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true) #endif #ifndef bpf_cmp_likely #define bpf_cmp_likely(LHS, OP, RHS) \ ({ \ bool ret = 0; \ if (__builtin_strcmp(#OP, "==") == 0) \ ret = _bpf_cmp(LHS, !=, RHS, false); \ else if (__builtin_strcmp(#OP, "!=") == 0) \ ret = _bpf_cmp(LHS, ==, RHS, false); \ else if (__builtin_strcmp(#OP, "<=") == 0) \ ret = _bpf_cmp(LHS, >, RHS, false); \ else if (__builtin_strcmp(#OP, "<") == 0) \ ret = _bpf_cmp(LHS, >=, RHS, false); \ else if (__builtin_strcmp(#OP, ">") == 0) \ ret = _bpf_cmp(LHS, <=, RHS, false); \ else if (__builtin_strcmp(#OP, ">=") == 0) \ ret = _bpf_cmp(LHS, <, RHS, false); \ else \ asm volatile("r0 " #OP " invalid compare"); \ ret; \ }) #endif /* * Note that cond_break can only be portably used in the body of a breakable * construct, whereas can_loop can be used anywhere. */ #ifdef __BPF_FEATURE_MAY_GOTO #define can_loop \ ({ __label__ l_break, l_continue; \ bool ret = true; \ asm volatile goto("may_goto %l[l_break]" \ :::: l_break); \ goto l_continue; \ l_break: ret = false; \ l_continue:; \ ret; \ }) #define cond_break \ ({ __label__ l_break, l_continue; \ asm volatile goto("may_goto %l[l_break]" \ :::: l_break); \ goto l_continue; \ l_break: break; \ l_continue:; \ }) #else #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define can_loop \ ({ __label__ l_break, l_continue; \ bool ret = true; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ .short 0" \ :::: l_break); \ goto l_continue; \ l_break: ret = false; \ l_continue:; \ ret; \ }) #define cond_break \ ({ __label__ l_break, l_continue; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ .short 0" \ :::: l_break); \ goto l_continue; \ l_break: break; \ l_continue:; \ }) #else #define can_loop \ ({ __label__ l_break, l_continue; \ bool ret = true; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ .short 0" \ :::: l_break); \ goto l_continue; \ l_break: ret = false; \ l_continue:; \ ret; \ }) #define cond_break \ ({ __label__ l_break, l_continue; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ .short 0" \ :::: l_break); \ goto l_continue; \ l_break: break; \ l_continue:; \ }) #endif #endif #ifndef bpf_nop_mov #define bpf_nop_mov(var) \ asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var)) #endif /* emit instruction: * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as */ #ifndef bpf_addr_space_cast #define bpf_addr_space_cast(var, dst_as, src_as)\ asm volatile(".byte 0xBF; \ .ifc %[reg], r0; \ .byte 0x00; \ .endif; \ .ifc %[reg], r1; \ .byte 0x11; \ .endif; \ .ifc %[reg], r2; \ .byte 0x22; \ .endif; \ .ifc %[reg], r3; \ .byte 0x33; \ .endif; \ .ifc %[reg], r4; \ .byte 0x44; \ .endif; \ .ifc %[reg], r5; \ .byte 0x55; \ .endif; \ .ifc %[reg], r6; \ .byte 0x66; \ .endif; \ .ifc %[reg], r7; \ .byte 0x77; \ .endif; \ .ifc %[reg], r8; \ .byte 0x88; \ .endif; \ .ifc %[reg], r9; \ .byte 0x99; \ .endif; \ .short %[off]; \ .long %[as]" \ : [reg]"+r"(var) \ : [off]"i"(BPF_ADDR_SPACE_CAST) \ , [as]"i"((dst_as << 16) | src_as)); #endif void bpf_preempt_disable(void) __weak __ksym; void bpf_preempt_enable(void) __weak __ksym; typedef struct { } __bpf_preempt_t; static inline __bpf_preempt_t __bpf_preempt_constructor(void) { __bpf_preempt_t ret = {}; bpf_preempt_disable(); return ret; } static inline void __bpf_preempt_destructor(__bpf_preempt_t *t) { bpf_preempt_enable(); } #define bpf_guard_preempt() \ __bpf_preempt_t ___bpf_apply(preempt, __COUNTER__) \ __attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) = \ __bpf_preempt_constructor() /* Description * Assert that a conditional expression is true. * Returns * Void. * Throws * An exception with the value zero when the assertion fails. */ #define bpf_assert(cond) if (!(cond)) bpf_throw(0); /* Description * Assert that a conditional expression is true. * Returns * Void. * Throws * An exception with the specified value when the assertion fails. */ #define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value); /* Description * Assert that LHS is in the range [BEG, END] (inclusive of both). This * statement updates the known bounds of LHS during verification. Note * that both BEG and END must be constant values, and must fit within the * data type of LHS. * Returns * Void. * Throws * An exception with the value zero when the assertion fails. */ #define bpf_assert_range(LHS, BEG, END) \ ({ \ _Static_assert(BEG <= END, "BEG must be <= END"); \ barrier_var(LHS); \ __bpf_assert_op(LHS, >=, BEG, 0, false); \ __bpf_assert_op(LHS, <=, END, 0, false); \ }) /* Description * Assert that LHS is in the range [BEG, END] (inclusive of both). This * statement updates the known bounds of LHS during verification. Note * that both BEG and END must be constant values, and must fit within the * data type of LHS. * Returns * Void. * Throws * An exception with the specified value when the assertion fails. */ #define bpf_assert_range_with(LHS, BEG, END, value) \ ({ \ _Static_assert(BEG <= END, "BEG must be <= END"); \ barrier_var(LHS); \ __bpf_assert_op(LHS, >=, BEG, value, false); \ __bpf_assert_op(LHS, <=, END, value, false); \ }) struct bpf_iter_css_task; struct cgroup_subsys_state; extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it, struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym; extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym; extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym; struct bpf_iter_task; extern int bpf_iter_task_new(struct bpf_iter_task *it, struct task_struct *task, unsigned int flags) __weak __ksym; extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym; extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym; struct bpf_iter_css; extern int bpf_iter_css_new(struct bpf_iter_css *it, struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym; extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, int (callback_fn)(void *map, int *key, void *value), unsigned int flags__k, void *aux__ign) __ksym; #define bpf_wq_set_callback(timer, cb, flags) \ bpf_wq_set_callback_impl(timer, cb, flags, NULL) struct bpf_iter_kmem_cache; extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym; extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym; extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym; #endif
// SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */ /* Copyright 2019 Collabora ltd. */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/dma-resv.h> #include <drm/gpu_scheduler.h> #include <drm/panfrost_drm.h> #include "panfrost_device.h" #include "panfrost_devfreq.h" #include "panfrost_job.h" #include "panfrost_features.h" #include "panfrost_issues.h" #include "panfrost_gem.h" #include "panfrost_regs.h" #include "panfrost_gpu.h" #include "panfrost_mmu.h" #include "panfrost_dump.h" #define JOB_TIMEOUT_MS 500 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) struct panfrost_queue_state { struct drm_gpu_scheduler sched; u64 fence_context; u64 emit_seqno; }; struct panfrost_job_slot { struct panfrost_queue_state queue[NUM_JOB_SLOTS]; spinlock_t job_lock; int irq; }; static struct panfrost_job * to_panfrost_job(struct drm_sched_job *sched_job) { return container_of(sched_job, struct panfrost_job, base); } struct panfrost_fence { struct dma_fence base; struct drm_device *dev; /* panfrost seqno for signaled() test */ u64 seqno; int queue; }; static inline struct panfrost_fence * to_panfrost_fence(struct dma_fence *fence) { return (struct panfrost_fence *)fence; } static const char *panfrost_fence_get_driver_name(struct dma_fence *fence) { return "panfrost"; } static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence) { struct panfrost_fence *f = to_panfrost_fence(fence); switch (f->queue) { case 0: return "panfrost-js-0"; case 1: return "panfrost-js-1"; case 2: return "panfrost-js-2"; default: return NULL; } } static const struct dma_fence_ops panfrost_fence_ops = { .get_driver_name = panfrost_fence_get_driver_name, .get_timeline_name = panfrost_fence_get_timeline_name, }; static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num) { struct panfrost_fence *fence; struct panfrost_job_slot *js = pfdev->js; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (!fence) return ERR_PTR(-ENOMEM); fence->dev = pfdev->ddev; fence->queue = js_num; fence->seqno = ++js->queue[js_num].emit_seqno; dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, js->queue[js_num].fence_context, fence->seqno); return &fence->base; } int panfrost_job_get_slot(struct panfrost_job *job) { /* JS0: fragment jobs. * JS1: vertex/tiler jobs * JS2: compute jobs */ if (job->requirements & PANFROST_JD_REQ_FS) return 0; /* Not exposed to userspace yet */ #if 0 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && (job->pfdev->features.nr_core_groups == 2)) return 2; if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) return 2; } #endif return 1; } static void panfrost_job_write_affinity(struct panfrost_device *pfdev, u32 requirements, int js) { u64 affinity; /* * Use all cores for now. * Eventually we may need to support tiler only jobs and h/w with * multiple (2) coherent core groups */ affinity = pfdev->features.shader_present; job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity)); job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity)); } static u32 panfrost_get_job_chain_flag(const struct panfrost_job *job) { struct panfrost_fence *f = to_panfrost_fence(job->done_fence); if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) return 0; return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0; } static struct panfrost_job * panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) { struct panfrost_job *job = pfdev->jobs[slot][0]; WARN_ON(!job); if (job->is_profiled && job->engine_usage) { job->engine_usage->elapsed_ns[slot] += ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); job->engine_usage->cycles[slot] += panfrost_cycle_counter_read(pfdev) - job->start_cycles; } if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled) panfrost_cycle_counter_put(pfdev); pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; pfdev->jobs[slot][1] = NULL; return job; } static unsigned int panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, struct panfrost_job *job) { if (WARN_ON(!job)) return 0; if (!pfdev->jobs[slot][0]) { pfdev->jobs[slot][0] = job; return 0; } WARN_ON(pfdev->jobs[slot][1]); pfdev->jobs[slot][1] = job; WARN_ON(panfrost_get_job_chain_flag(job) == panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); return 1; } static void panfrost_job_hw_submit(struct panfrost_job *job, int js) { struct panfrost_device *pfdev = job->pfdev; unsigned int subslot; u32 cfg; u64 jc_head = job->jc; int ret; panfrost_devfreq_record_busy(&pfdev->pfdevfreq); ret = pm_runtime_get_sync(pfdev->dev); if (ret < 0) return; if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { return; } cfg = panfrost_mmu_as_get(pfdev, job->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); panfrost_job_write_affinity(pfdev, job->requirements, js); /* start MMU, medium priority, cache clean/flush on end, clean/flush on * start */ cfg |= JS_CONFIG_THREAD_PRI(8) | JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE | panfrost_get_job_chain_flag(job); if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649)) cfg |= JS_CONFIG_START_MMU; job_write(pfdev, JS_CONFIG_NEXT(js), cfg); if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id); /* GO ! */ spin_lock(&pfdev->js->job_lock); subslot = panfrost_enqueue_job(pfdev, js, job); /* Don't queue the job if a reset is in progress */ if (!atomic_read(&pfdev->reset.pending)) { job->is_profiled = pfdev->profile_mode; if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled) panfrost_cycle_counter_get(pfdev); if (job->is_profiled) { job->start_time = ktime_get(); job->start_cycles = panfrost_cycle_counter_read(pfdev); } job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", job, js, subslot, jc_head, cfg & 0xf); } spin_unlock(&pfdev->js->job_lock); } static int panfrost_acquire_object_fences(struct drm_gem_object **bos, int bo_count, struct drm_sched_job *job) { int i, ret; for (i = 0; i < bo_count; i++) { ret = dma_resv_reserve_fences(bos[i]->resv, 1); if (ret) return ret; /* panfrost always uses write mode in its current uapi */ ret = drm_sched_job_add_implicit_dependencies(job, bos[i], true); if (ret) return ret; } return 0; } static void panfrost_attach_object_fences(struct drm_gem_object **bos, int bo_count, struct dma_fence *fence) { int i; for (i = 0; i < bo_count; i++) dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); } int panfrost_job_push(struct panfrost_job *job) { struct panfrost_device *pfdev = job->pfdev; struct ww_acquire_ctx acquire_ctx; int ret = 0; ret = drm_gem_lock_reservations(job->bos, job->bo_count, &acquire_ctx); if (ret) return ret; mutex_lock(&pfdev->sched_lock); drm_sched_job_arm(&job->base); job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); ret = panfrost_acquire_object_fences(job->bos, job->bo_count, &job->base); if (ret) { mutex_unlock(&pfdev->sched_lock); goto unlock; } kref_get(&job->refcount); /* put by scheduler job completion */ drm_sched_entity_push_job(&job->base); mutex_unlock(&pfdev->sched_lock); panfrost_attach_object_fences(job->bos, job->bo_count, job->render_done_fence); unlock: drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx); return ret; } static void panfrost_job_cleanup(struct kref *ref) { struct panfrost_job *job = container_of(ref, struct panfrost_job, refcount); unsigned int i; dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); if (job->mappings) { for (i = 0; i < job->bo_count; i++) { if (!job->mappings[i]) break; atomic_dec(&job->mappings[i]->obj->gpu_usecount); panfrost_gem_mapping_put(job->mappings[i]); } kvfree(job->mappings); } if (job->bos) { for (i = 0; i < job->bo_count; i++) drm_gem_object_put(job->bos[i]); kvfree(job->bos); } kfree(job); } void panfrost_job_put(struct panfrost_job *job) { kref_put(&job->refcount, panfrost_job_cleanup); } static void panfrost_job_free(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); drm_sched_job_cleanup(sched_job); panfrost_job_put(job); } static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; int slot = panfrost_job_get_slot(job); struct dma_fence *fence = NULL; if (unlikely(job->base.s_fence->finished.error)) return NULL; /* Nothing to execute: can happen if the job has finished while * we were resetting the GPU. */ if (!job->jc) return NULL; fence = panfrost_fence_create(pfdev, slot); if (IS_ERR(fence)) return fence; if (job->done_fence) dma_fence_put(job->done_fence); job->done_fence = dma_fence_get(fence); panfrost_job_hw_submit(job, slot); return fence; } void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) { int j; u32 irq_mask = 0; clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); for (j = 0; j < NUM_JOB_SLOTS; j++) { irq_mask |= MK_JS_MASK(j); } job_write(pfdev, JOB_INT_CLEAR, irq_mask); job_write(pfdev, JOB_INT_MASK, irq_mask); } void panfrost_job_suspend_irq(struct panfrost_device *pfdev) { set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); job_write(pfdev, JOB_INT_MASK, 0); synchronize_irq(pfdev->js->irq); } static void panfrost_job_handle_err(struct panfrost_device *pfdev, struct panfrost_job *job, unsigned int js) { u32 js_status = job_read(pfdev, JS_STATUS(js)); const char *exception_name = panfrost_exception_name(js_status); bool signal_fence = true; if (!panfrost_exception_is_fault(js_status)) { dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); } else { dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); } if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) { /* Update the job head so we can resume */ job->jc = job_read(pfdev, JS_TAIL_LO(js)) | ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32); /* The job will be resumed, don't signal the fence */ signal_fence = false; } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) { /* Job has been hard-stopped, flag it as canceled */ dma_fence_set_error(job->done_fence, -ECANCELED); job->jc = 0; } else if (panfrost_exception_is_fault(js_status)) { /* We might want to provide finer-grained error code based on * the exception type, but unconditionally setting to EINVAL * is good enough for now. */ dma_fence_set_error(job->done_fence, -EINVAL); job->jc = 0; } panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); if (signal_fence) dma_fence_signal_locked(job->done_fence); pm_runtime_put_autosuspend(pfdev->dev); if (panfrost_exception_needs_reset(pfdev, js_status)) { atomic_set(&pfdev->reset.pending, 1); drm_sched_fault(&pfdev->js->queue[js].sched); } } static void panfrost_job_handle_done(struct panfrost_device *pfdev, struct panfrost_job *job) { /* Set ->jc to 0 to avoid re-submitting an already finished job (can * happen when we receive the DONE interrupt while doing a GPU reset). */ job->jc = 0; panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); pm_runtime_put_autosuspend(pfdev->dev); } static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) { struct panfrost_job *done[NUM_JOB_SLOTS][2] = {}; struct panfrost_job *failed[NUM_JOB_SLOTS] = {}; u32 js_state = 0, js_events = 0; unsigned int i, j; /* First we collect all failed/done jobs. */ while (status) { u32 js_state_mask = 0; for (j = 0; j < NUM_JOB_SLOTS; j++) { if (status & MK_JS_MASK(j)) js_state_mask |= MK_JS_MASK(j); if (status & JOB_INT_MASK_DONE(j)) { if (done[j][0]) done[j][1] = panfrost_dequeue_job(pfdev, j); else done[j][0] = panfrost_dequeue_job(pfdev, j); } if (status & JOB_INT_MASK_ERR(j)) { /* Cancel the next submission. Will be submitted * after we're done handling this failure if * there's no reset pending. */ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); failed[j] = panfrost_dequeue_job(pfdev, j); } } /* JS_STATE is sampled when JOB_INT_CLEAR is written. * For each BIT(slot) or BIT(slot + 16) bit written to * JOB_INT_CLEAR, the corresponding bits in JS_STATE * (BIT(slot) and BIT(slot + 16)) are updated, but this * is racy. If we only have one job done at the time we * read JOB_INT_RAWSTAT but the second job fails before we * clear the status, we end up with a status containing * only the DONE bit and consider both jobs as DONE since * JS_STATE reports both NEXT and CURRENT as inactive. * To prevent that, let's repeat this clear+read steps * until status is 0. */ job_write(pfdev, JOB_INT_CLEAR, status); js_state &= ~js_state_mask; js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask; js_events |= status; status = job_read(pfdev, JOB_INT_RAWSTAT); } /* Then we handle the dequeued jobs. */ for (j = 0; j < NUM_JOB_SLOTS; j++) { if (!(js_events & MK_JS_MASK(j))) continue; if (failed[j]) { panfrost_job_handle_err(pfdev, failed[j], j); } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { /* When the current job doesn't fail, the JM dequeues * the next job without waiting for an ACK, this means * we can have 2 jobs dequeued and only catch the * interrupt when the second one is done. If both slots * are inactive, but one job remains in pfdev->jobs[j], * consider it done. Of course that doesn't apply if a * failure happened since we cancelled execution of the * job in _NEXT (see above). */ if (WARN_ON(!done[j][0])) done[j][0] = panfrost_dequeue_job(pfdev, j); else done[j][1] = panfrost_dequeue_job(pfdev, j); } for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++) panfrost_job_handle_done(pfdev, done[j][i]); } /* And finally we requeue jobs that were waiting in the second slot * and have been stopped if we detected a failure on the first slot. */ for (j = 0; j < NUM_JOB_SLOTS; j++) { if (!(js_events & MK_JS_MASK(j))) continue; if (!failed[j] || !pfdev->jobs[j][0]) continue; if (pfdev->jobs[j][0]->jc == 0) { /* The job was cancelled, signal the fence now */ struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j); dma_fence_set_error(canceled->done_fence, -ECANCELED); panfrost_job_handle_done(pfdev, canceled); } else if (!atomic_read(&pfdev->reset.pending)) { /* Requeue the job we removed if no reset is pending */ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START); } } } static void panfrost_job_handle_irqs(struct panfrost_device *pfdev) { u32 status = job_read(pfdev, JOB_INT_RAWSTAT); while (status) { pm_runtime_mark_last_busy(pfdev->dev); spin_lock(&pfdev->js->job_lock); panfrost_job_handle_irq(pfdev, status); spin_unlock(&pfdev->js->job_lock); status = job_read(pfdev, JOB_INT_RAWSTAT); } } static u32 panfrost_active_slots(struct panfrost_device *pfdev, u32 *js_state_mask, u32 js_state) { u32 rawstat; if (!(js_state & *js_state_mask)) return 0; rawstat = job_read(pfdev, JOB_INT_RAWSTAT); if (rawstat) { unsigned int i; for (i = 0; i < NUM_JOB_SLOTS; i++) { if (rawstat & MK_JS_MASK(i)) *js_state_mask &= ~MK_JS_MASK(i); } } return js_state & *js_state_mask; } static void panfrost_reset(struct panfrost_device *pfdev, struct drm_sched_job *bad) { u32 js_state, js_state_mask = 0xffffffff; unsigned int i, j; bool cookie; int ret; if (!atomic_read(&pfdev->reset.pending)) return; /* Stop the schedulers. * * FIXME: We temporarily get out of the dma_fence_signalling section * because the cleanup path generate lockdep splats when taking locks * to release job resources. We should rework the code to follow this * pattern: * * try_lock * if (locked) * release * else * schedule_work_to_release_later */ for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_stop(&pfdev->js->queue[i].sched, bad); cookie = dma_fence_begin_signalling(); if (bad) drm_sched_increase_karma(bad); /* Mask job interrupts and synchronize to make sure we won't be * interrupted during our reset. */ job_write(pfdev, JOB_INT_MASK, 0); synchronize_irq(pfdev->js->irq); for (i = 0; i < NUM_JOB_SLOTS; i++) { /* Cancel the next job and soft-stop the running job. */ job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP); job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP); } /* Wait at most 10ms for soft-stops to complete */ ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state, !panfrost_active_slots(pfdev, &js_state_mask, js_state), 10, 10000); if (ret) dev_err(pfdev->dev, "Soft-stop failed\n"); /* Handle the remaining interrupts before we reset. */ panfrost_job_handle_irqs(pfdev); /* Remaining interrupts have been handled, but we might still have * stuck jobs. Let's make sure the PM counters stay balanced by * manually calling pm_runtime_put_noidle() and * panfrost_devfreq_record_idle() for each stuck job. * Let's also make sure the cycle counting register's refcnt is * kept balanced to prevent it from running forever */ spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); pm_runtime_put_noidle(pfdev->dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); } } memset(pfdev->jobs, 0, sizeof(pfdev->jobs)); spin_unlock(&pfdev->js->job_lock); /* Proceed with reset now. */ panfrost_device_reset(pfdev); /* panfrost_device_reset() unmasks job interrupts, but we want to * keep them masked a bit longer. */ job_write(pfdev, JOB_INT_MASK, 0); /* GPU has been reset, we can clear the reset pending bit. */ atomic_set(&pfdev->reset.pending, 0); /* Now resubmit jobs that were previously queued but didn't have a * chance to finish. * FIXME: We temporarily get out of the DMA fence signalling section * while resubmitting jobs because the job submission logic will * allocate memory with the GFP_KERNEL flag which can trigger memory * reclaim and exposes a lock ordering issue. */ dma_fence_end_signalling(cookie); for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); cookie = dma_fence_begin_signalling(); /* Restart the schedulers */ for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ job_write(pfdev, JOB_INT_MASK, GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | GENMASK(NUM_JOB_SLOTS - 1, 0)); dma_fence_end_signalling(cookie); } static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; int js = panfrost_job_get_slot(job); /* * If the GPU managed to complete this jobs fence, the timeout is * spurious. Bail out. */ if (dma_fence_is_signaled(job->done_fence)) return DRM_GPU_SCHED_STAT_NOMINAL; /* * Panfrost IRQ handler may take a long time to process an interrupt * if there is another IRQ handler hogging the processing. * For example, the HDMI encoder driver might be stuck in the IRQ * handler for a significant time in a case of bad cable connection. * In order to catch such cases and not report spurious Panfrost * job timeouts, synchronize the IRQ handler and re-check the fence * status. */ synchronize_irq(pfdev->js->irq); if (dma_fence_is_signaled(job->done_fence)) { dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); return DRM_GPU_SCHED_STAT_NOMINAL; } dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, job_read(pfdev, JS_CONFIG(js)), job_read(pfdev, JS_STATUS(js)), job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js)), sched_job); panfrost_core_dump(job); atomic_set(&pfdev->reset.pending, 1); panfrost_reset(pfdev, sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } static void panfrost_reset_work(struct work_struct *work) { struct panfrost_device *pfdev; pfdev = container_of(work, struct panfrost_device, reset.work); panfrost_reset(pfdev, NULL); } static const struct drm_sched_backend_ops panfrost_sched_ops = { .run_job = panfrost_job_run, .timedout_job = panfrost_job_timedout, .free_job = panfrost_job_free }; static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; panfrost_job_handle_irqs(pfdev); /* Enable interrupts only if we're not about to get suspended */ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) job_write(pfdev, JOB_INT_MASK, GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | GENMASK(NUM_JOB_SLOTS - 1, 0)); return IRQ_HANDLED; } static irqreturn_t panfrost_job_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status; if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) return IRQ_NONE; status = job_read(pfdev, JOB_INT_STAT); if (!status) return IRQ_NONE; job_write(pfdev, JOB_INT_MASK, 0); return IRQ_WAKE_THREAD; } int panfrost_job_init(struct panfrost_device *pfdev) { struct panfrost_job_slot *js; unsigned int nentries = 2; int ret, j; /* All GPUs have two entries per queue, but without jobchain * disambiguation stopping the right job in the close path is tricky, * so let's just advertise one entry in that case. */ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) nentries = 1; pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); if (!js) return -ENOMEM; INIT_WORK(&pfdev->reset.work, panfrost_reset_work); spin_lock_init(&js->job_lock); js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); if (js->irq < 0) return js->irq; ret = devm_request_threaded_irq(pfdev->dev, js->irq, panfrost_job_irq_handler, panfrost_job_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-job", pfdev); if (ret) { dev_err(pfdev->dev, "failed to request job irq"); return ret; } pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); if (!pfdev->reset.wq) return -ENOMEM; for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); ret = drm_sched_init(&js->queue[j].sched, &panfrost_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, nentries, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), pfdev->reset.wq, NULL, "pan_js", pfdev->dev); if (ret) { dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); goto err_sched; } } panfrost_job_enable_interrupts(pfdev); return 0; err_sched: for (j--; j >= 0; j--) drm_sched_fini(&js->queue[j].sched); destroy_workqueue(pfdev->reset.wq); return ret; } void panfrost_job_fini(struct panfrost_device *pfdev) { struct panfrost_job_slot *js = pfdev->js; int j; job_write(pfdev, JOB_INT_MASK, 0); for (j = 0; j < NUM_JOB_SLOTS; j++) { drm_sched_fini(&js->queue[j].sched); } cancel_work_sync(&pfdev->reset.work); destroy_workqueue(pfdev->reset.wq); } int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) { struct panfrost_device *pfdev = panfrost_priv->pfdev; struct panfrost_job_slot *js = pfdev->js; struct drm_gpu_scheduler *sched; int ret, i; for (i = 0; i < NUM_JOB_SLOTS; i++) { sched = &js->queue[i].sched; ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); if (WARN_ON(ret)) return ret; } return 0; } void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) { struct panfrost_device *pfdev = panfrost_priv->pfdev; int i; for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); /* Kill in-flight jobs */ spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; int j; for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { struct panfrost_job *job = pfdev->jobs[i][j]; u32 cmd; if (!job || job->base.entity != entity) continue; if (j == 1) { /* Try to cancel the job before it starts */ job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP); /* Reset the job head so it doesn't get restarted if * the job in the first slot failed. */ job->jc = 0; } if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { cmd = panfrost_get_job_chain_flag(job) ? JS_COMMAND_HARD_STOP_1 : JS_COMMAND_HARD_STOP_0; } else { cmd = JS_COMMAND_HARD_STOP; } job_write(pfdev, JS_COMMAND(i), cmd); /* Jobs can outlive their file context */ job->engine_usage = NULL; } } spin_unlock(&pfdev->js->job_lock); } int panfrost_job_is_idle(struct panfrost_device *pfdev) { struct panfrost_job_slot *js = pfdev->js; int i; for (i = 0; i < NUM_JOB_SLOTS; i++) { /* If there are any jobs in the HW queue, we're not idle */ if (atomic_read(&js->queue[i].sched.credit_count)) return false; } return true; }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * rt8973a.h * * Copyright (c) 2014 Samsung Electronics Co., Ltd */ #ifndef __LINUX_EXTCON_RT8973A_H #define __LINUX_EXTCON_RT8973A_H enum rt8973a_types { TYPE_RT8973A, }; /* RT8973A registers */ enum rt8973A_reg { RT8973A_REG_DEVICE_ID = 0x1, RT8973A_REG_CONTROL1, RT8973A_REG_INT1, RT8973A_REG_INT2, RT8973A_REG_INTM1, RT8973A_REG_INTM2, RT8973A_REG_ADC, RT8973A_REG_RSVD_1, RT8973A_REG_RSVD_2, RT8973A_REG_DEV1, RT8973A_REG_DEV2, RT8973A_REG_RSVD_3, RT8973A_REG_RSVD_4, RT8973A_REG_RSVD_5, RT8973A_REG_RSVD_6, RT8973A_REG_RSVD_7, RT8973A_REG_RSVD_8, RT8973A_REG_RSVD_9, RT8973A_REG_MANUAL_SW1, RT8973A_REG_MANUAL_SW2, RT8973A_REG_RSVD_10, RT8973A_REG_RSVD_11, RT8973A_REG_RSVD_12, RT8973A_REG_RSVD_13, RT8973A_REG_RSVD_14, RT8973A_REG_RSVD_15, RT8973A_REG_RESET, RT8973A_REG_END, }; /* Define RT8973A MASK/SHIFT constant */ #define RT8973A_REG_DEVICE_ID_VENDOR_SHIFT 0 #define RT8973A_REG_DEVICE_ID_VERSION_SHIFT 3 #define RT8973A_REG_DEVICE_ID_VENDOR_MASK (0x7 << RT8973A_REG_DEVICE_ID_VENDOR_SHIFT) #define RT8973A_REG_DEVICE_ID_VERSION_MASK (0x1f << RT8973A_REG_DEVICE_ID_VERSION_SHIFT) #define RT8973A_REG_CONTROL1_INTM_SHIFT 0 #define RT8973A_REG_CONTROL1_AUTO_CONFIG_SHIFT 2 #define RT8973A_REG_CONTROL1_I2C_RST_EN_SHIFT 3 #define RT8973A_REG_CONTROL1_SWITCH_OPEN_SHIFT 4 #define RT8973A_REG_CONTROL1_CHGTYP_SHIFT 5 #define RT8973A_REG_CONTROL1_USB_CHD_EN_SHIFT 6 #define RT8973A_REG_CONTROL1_ADC_EN_SHIFT 7 #define RT8973A_REG_CONTROL1_INTM_MASK (0x1 << RT8973A_REG_CONTROL1_INTM_SHIFT) #define RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK (0x1 << RT8973A_REG_CONTROL1_AUTO_CONFIG_SHIFT) #define RT8973A_REG_CONTROL1_I2C_RST_EN_MASK (0x1 << RT8973A_REG_CONTROL1_I2C_RST_EN_SHIFT) #define RT8973A_REG_CONTROL1_SWITCH_OPEN_MASK (0x1 << RT8973A_REG_CONTROL1_SWITCH_OPEN_SHIFT) #define RT8973A_REG_CONTROL1_CHGTYP_MASK (0x1 << RT8973A_REG_CONTROL1_CHGTYP_SHIFT) #define RT8973A_REG_CONTROL1_USB_CHD_EN_MASK (0x1 << RT8973A_REG_CONTROL1_USB_CHD_EN_SHIFT) #define RT8973A_REG_CONTROL1_ADC_EN_MASK (0x1 << RT8973A_REG_CONTROL1_ADC_EN_SHIFT) #define RT9873A_REG_INTM1_ATTACH_SHIFT 0 #define RT9873A_REG_INTM1_DETACH_SHIFT 1 #define RT9873A_REG_INTM1_CHGDET_SHIFT 2 #define RT9873A_REG_INTM1_DCD_T_SHIFT 3 #define RT9873A_REG_INTM1_OVP_SHIFT 4 #define RT9873A_REG_INTM1_CONNECT_SHIFT 5 #define RT9873A_REG_INTM1_ADC_CHG_SHIFT 6 #define RT9873A_REG_INTM1_OTP_SHIFT 7 #define RT9873A_REG_INTM1_ATTACH_MASK (0x1 << RT9873A_REG_INTM1_ATTACH_SHIFT) #define RT9873A_REG_INTM1_DETACH_MASK (0x1 << RT9873A_REG_INTM1_DETACH_SHIFT) #define RT9873A_REG_INTM1_CHGDET_MASK (0x1 << RT9873A_REG_INTM1_CHGDET_SHIFT) #define RT9873A_REG_INTM1_DCD_T_MASK (0x1 << RT9873A_REG_INTM1_DCD_T_SHIFT) #define RT9873A_REG_INTM1_OVP_MASK (0x1 << RT9873A_REG_INTM1_OVP_SHIFT) #define RT9873A_REG_INTM1_CONNECT_MASK (0x1 << RT9873A_REG_INTM1_CONNECT_SHIFT) #define RT9873A_REG_INTM1_ADC_CHG_MASK (0x1 << RT9873A_REG_INTM1_ADC_CHG_SHIFT) #define RT9873A_REG_INTM1_OTP_MASK (0x1 << RT9873A_REG_INTM1_OTP_SHIFT) #define RT9873A_REG_INTM2_UVLO_SHIFT 1 #define RT9873A_REG_INTM2_POR_SHIFT 2 #define RT9873A_REG_INTM2_OTP_FET_SHIFT 3 #define RT9873A_REG_INTM2_OVP_FET_SHIFT 4 #define RT9873A_REG_INTM2_OCP_LATCH_SHIFT 5 #define RT9873A_REG_INTM2_OCP_SHIFT 6 #define RT9873A_REG_INTM2_OVP_OCP_SHIFT 7 #define RT9873A_REG_INTM2_UVLO_MASK (0x1 << RT9873A_REG_INTM2_UVLO_SHIFT) #define RT9873A_REG_INTM2_POR_MASK (0x1 << RT9873A_REG_INTM2_POR_SHIFT) #define RT9873A_REG_INTM2_OTP_FET_MASK (0x1 << RT9873A_REG_INTM2_OTP_FET_SHIFT) #define RT9873A_REG_INTM2_OVP_FET_MASK (0x1 << RT9873A_REG_INTM2_OVP_FET_SHIFT) #define RT9873A_REG_INTM2_OCP_LATCH_MASK (0x1 << RT9873A_REG_INTM2_OCP_LATCH_SHIFT) #define RT9873A_REG_INTM2_OCP_MASK (0x1 << RT9873A_REG_INTM2_OCP_SHIFT) #define RT9873A_REG_INTM2_OVP_OCP_MASK (0x1 << RT9873A_REG_INTM2_OVP_OCP_SHIFT) #define RT8973A_REG_ADC_SHIFT 0 #define RT8973A_REG_ADC_MASK (0x1f << RT8973A_REG_ADC_SHIFT) #define RT8973A_REG_DEV1_OTG_SHIFT 0 #define RT8973A_REG_DEV1_SDP_SHIFT 2 #define RT8973A_REG_DEV1_UART_SHIFT 3 #define RT8973A_REG_DEV1_CAR_KIT_TYPE1_SHIFT 4 #define RT8973A_REG_DEV1_CDPORT_SHIFT 5 #define RT8973A_REG_DEV1_DCPORT_SHIFT 6 #define RT8973A_REG_DEV1_OTG_MASK (0x1 << RT8973A_REG_DEV1_OTG_SHIFT) #define RT8973A_REG_DEV1_SDP_MASK (0x1 << RT8973A_REG_DEV1_SDP_SHIFT) #define RT8973A_REG_DEV1_UART_MASK (0x1 << RT8973A_REG_DEV1_UART_SHIFT) #define RT8973A_REG_DEV1_CAR_KIT_TYPE1_MASK (0x1 << RT8973A_REG_DEV1_CAR_KIT_TYPE1_SHIFT) #define RT8973A_REG_DEV1_CDPORT_MASK (0x1 << RT8973A_REG_DEV1_CDPORT_SHIFT) #define RT8973A_REG_DEV1_DCPORT_MASK (0x1 << RT8973A_REG_DEV1_DCPORT_SHIFT) #define RT8973A_REG_DEV1_USB_MASK (RT8973A_REG_DEV1_SDP_MASK \ | RT8973A_REG_DEV1_CDPORT_MASK) #define RT8973A_REG_DEV2_JIG_USB_ON_SHIFT 0 #define RT8973A_REG_DEV2_JIG_USB_OFF_SHIFT 1 #define RT8973A_REG_DEV2_JIG_UART_ON_SHIFT 2 #define RT8973A_REG_DEV2_JIG_UART_OFF_SHIFT 3 #define RT8973A_REG_DEV2_JIG_USB_ON_MASK (0x1 << RT8973A_REG_DEV2_JIG_USB_ON_SHIFT) #define RT8973A_REG_DEV2_JIG_USB_OFF_MASK (0x1 << RT8973A_REG_DEV2_JIG_USB_OFF_SHIFT) #define RT8973A_REG_DEV2_JIG_UART_ON_MASK (0x1 << RT8973A_REG_DEV2_JIG_UART_ON_SHIFT) #define RT8973A_REG_DEV2_JIG_UART_OFF_MASK (0x1 << RT8973A_REG_DEV2_JIG_UART_OFF_SHIFT) #define RT8973A_REG_MANUAL_SW1_DP_SHIFT 2 #define RT8973A_REG_MANUAL_SW1_DM_SHIFT 5 #define RT8973A_REG_MANUAL_SW1_DP_MASK (0x7 << RT8973A_REG_MANUAL_SW1_DP_SHIFT) #define RT8973A_REG_MANUAL_SW1_DM_MASK (0x7 << RT8973A_REG_MANUAL_SW1_DM_SHIFT) #define DM_DP_CON_SWITCH_OPEN 0x0 #define DM_DP_CON_SWITCH_USB 0x1 #define DM_DP_CON_SWITCH_UART 0x3 #define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | (DM_DP_CON_SWITCH_OPEN << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) #define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | (DM_DP_CON_SWITCH_USB << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | (DM_DP_CON_SWITCH_UART << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) #define RT8973A_REG_MANUAL_SW2_FET_ON_SHIFT 0 #define RT8973A_REG_MANUAL_SW2_JIG_ON_SHIFT 2 #define RT8973A_REG_MANUAL_SW2_BOOT_SW_SHIFT 3 #define RT8973A_REG_MANUAL_SW2_FET_ON_MASK (0x1 << RT8973A_REG_MANUAL_SW2_FET_ON_SHIFT) #define RT8973A_REG_MANUAL_SW2_JIG_ON_MASK (0x1 << RT8973A_REG_MANUAL_SW2_JIG_ON_SHIFT) #define RT8973A_REG_MANUAL_SW2_BOOT_SW_MASK (0x1 << RT8973A_REG_MANUAL_SW2_BOOT_SW_SHIFT) #define RT8973A_REG_MANUAL_SW2_FET_ON 0 #define RT8973A_REG_MANUAL_SW2_FET_OFF 0x1 #define RT8973A_REG_MANUAL_SW2_JIG_OFF 0 #define RT8973A_REG_MANUAL_SW2_JIG_ON 0x1 #define RT8973A_REG_MANUAL_SW2_BOOT_SW_ON 0 #define RT8973A_REG_MANUAL_SW2_BOOT_SW_OFF 0x1 #define RT8973A_REG_RESET_SHIFT 0 #define RT8973A_REG_RESET_MASK (0x1 << RT8973A_REG_RESET_SHIFT) #define RT8973A_REG_RESET 0x1 /* RT8973A Interrupts */ enum rt8973a_irq { /* Interrupt1*/ RT8973A_INT1_ATTACH, RT8973A_INT1_DETACH, RT8973A_INT1_CHGDET, RT8973A_INT1_DCD_T, RT8973A_INT1_OVP, RT8973A_INT1_CONNECT, RT8973A_INT1_ADC_CHG, RT8973A_INT1_OTP, /* Interrupt2*/ RT8973A_INT2_UVLO, RT8973A_INT2_POR, RT8973A_INT2_OTP_FET, RT8973A_INT2_OVP_FET, RT8973A_INT2_OCP_LATCH, RT8973A_INT2_OCP, RT8973A_INT2_OVP_OCP, RT8973A_NUM, }; #define RT8973A_INT1_ATTACH_MASK BIT(0) #define RT8973A_INT1_DETACH_MASK BIT(1) #define RT8973A_INT1_CHGDET_MASK BIT(2) #define RT8973A_INT1_DCD_T_MASK BIT(3) #define RT8973A_INT1_OVP_MASK BIT(4) #define RT8973A_INT1_CONNECT_MASK BIT(5) #define RT8973A_INT1_ADC_CHG_MASK BIT(6) #define RT8973A_INT1_OTP_MASK BIT(7) #define RT8973A_INT2_UVLOT_MASK BIT(0) #define RT8973A_INT2_POR_MASK BIT(1) #define RT8973A_INT2_OTP_FET_MASK BIT(2) #define RT8973A_INT2_OVP_FET_MASK BIT(3) #define RT8973A_INT2_OCP_LATCH_MASK BIT(4) #define RT8973A_INT2_OCP_MASK BIT(5) #define RT8973A_INT2_OVP_OCP_MASK BIT(6) #endif /* __LINUX_EXTCON_RT8973A_H */
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2019 Intel Corporation */ #ifndef __INTEL_DSI_DCS_BACKLIGHT_H__ #define __INTEL_DSI_DCS_BACKLIGHT_H__ struct intel_connector; int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); #endif /* __INTEL_DSI_DCS_BACKLIGHT_H__ */
// SPDX-License-Identifier: BSD-3-Clause /* * Copyright (c) 2016-2022, AngeloGioacchino Del Regno * <[email protected]> * Copyright (c) 2022, Konrad Dybcio <[email protected]> * Copyright (c) 2022, Marijn Suijten <[email protected]> */ #include <dt-bindings/clock/qcom,gcc-msm8976.h> #include <dt-bindings/clock/qcom,rpmcc.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/power/qcom-rpmpd.h> / { interrupt-parent = <&intc>; #address-cells = <2>; #size-cells = <2>; chosen { }; clocks { xo_board: xo-board { compatible = "fixed-clock"; #clock-cells = <0>; }; }; cpus { #address-cells = <1>; #size-cells = <0>; cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x0>; enable-method = "psci"; cpu-idle-states = <&little_cpu_sleep_0>; capacity-dmips-mhz = <573>; next-level-cache = <&l2_0>; #cooling-cells = <2>; }; cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x1>; enable-method = "psci"; cpu-idle-states = <&little_cpu_sleep_0>; capacity-dmips-mhz = <573>; next-level-cache = <&l2_0>; #cooling-cells = <2>; }; cpu2: cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x2>; enable-method = "psci"; cpu-idle-states = <&little_cpu_sleep_0>; capacity-dmips-mhz = <573>; next-level-cache = <&l2_0>; #cooling-cells = <2>; }; cpu3: cpu@3 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x3>; enable-method = "psci"; cpu-idle-states = <&little_cpu_sleep_0>; capacity-dmips-mhz = <573>; next-level-cache = <&l2_0>; #cooling-cells = <2>; }; cpu4: cpu@100 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0x100>; enable-method = "psci"; cpu-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>; capacity-dmips-mhz = <1024>; next-level-cache = <&l2_1>; #cooling-cells = <2>; }; cpu5: cpu@101 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0x101>; enable-method = "psci"; cpu-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>; capacity-dmips-mhz = <1024>; next-level-cache = <&l2_1>; #cooling-cells = <2>; }; cpu6: cpu@102 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0x102>; enable-method = "psci"; cpu-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>; capacity-dmips-mhz = <1024>; next-level-cache = <&l2_1>; #cooling-cells = <2>; }; cpu7: cpu@103 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0x103>; enable-method = "psci"; cpu-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>; capacity-dmips-mhz = <1024>; next-level-cache = <&l2_1>; #cooling-cells = <2>; }; cpu-map { cluster0 { core0 { cpu = <&cpu0>; }; core1 { cpu = <&cpu1>; }; core2 { cpu = <&cpu2>; }; core3 { cpu = <&cpu3>; }; }; cluster1 { core0 { cpu = <&cpu4>; }; core1 { cpu = <&cpu5>; }; core2 { cpu = <&cpu6>; }; core3 { cpu = <&cpu7>; }; }; }; idle-states { entry-method = "psci"; little_cpu_sleep_0: cpu-sleep-0-0 { compatible = "arm,idle-state"; idle-state-name = "little-power-collapse"; arm,psci-suspend-param = <0x40000003>; entry-latency-us = <181>; exit-latency-us = <149>; min-residency-us = <703>; local-timer-stop; }; big_cpu_sleep_0: cpu-sleep-1-0 { compatible = "arm,idle-state"; idle-state-name = "big-retention"; arm,psci-suspend-param = <0x00000002>; entry-latency-us = <142>; exit-latency-us = <99>; min-residency-us = <242>; }; big_cpu_sleep_1: cpu-sleep-1-1 { compatible = "arm,idle-state"; idle-state-name = "big-power-collapse"; arm,psci-suspend-param = <0x40000003>; entry-latency-us = <158>; exit-latency-us = <144>; min-residency-us = <863>; local-timer-stop; }; }; l2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; cache-unified; }; l2_1: l2-cache1 { compatible = "cache"; cache-level = <2>; cache-unified; }; }; firmware { scm: scm { compatible = "qcom,scm-msm8976", "qcom,scm"; clocks = <&gcc GCC_CRYPTO_CLK>, <&gcc GCC_CRYPTO_AXI_CLK>, <&gcc GCC_CRYPTO_AHB_CLK>; clock-names = "core", "bus", "iface"; #reset-cells = <1>; qcom,dload-mode = <&tcsr 0x6100>; }; }; memory@80000000 { device_type = "memory"; /* We expect the bootloader to fill in the size */ reg = <0x0 0x80000000 0x0 0x0>; }; pmu-a53 { compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; }; pmu_a72: pmu-a72 { compatible = "arm,cortex-a72-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_RAW(0xf0) | IRQ_TYPE_LEVEL_HIGH)>; }; psci { compatible = "arm,psci-1.0"; method = "smc"; }; rpm: remoteproc { compatible = "qcom,msm8976-rpm-proc", "qcom,rpm-proc"; smd-edge { interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>; mboxes = <&apcs 0>; qcom,smd-edge = <15>; rpm_requests: rpm-requests { compatible = "qcom,rpm-msm8976", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { compatible = "qcom,rpmcc-msm8976", "qcom,rpmcc"; clocks = <&xo_board>; clock-names = "xo"; #clock-cells = <1>; }; rpmpd: power-controller { compatible = "qcom,msm8976-rpmpd"; #power-domain-cells = <1>; operating-points-v2 = <&rpmpd_opp_table>; rpmpd_opp_table: opp-table { compatible = "operating-points-v2"; rpmpd_opp_ret: opp1 { opp-level = <RPM_SMD_LEVEL_RETENTION>; }; rpmpd_opp_ret_plus: opp2 { opp-level = <RPM_SMD_LEVEL_RETENTION_PLUS>; }; rpmpd_opp_min_svs: opp3 { opp-level = <RPM_SMD_LEVEL_MIN_SVS>; }; rpmpd_opp_low_svs: opp4 { opp-level = <RPM_SMD_LEVEL_LOW_SVS>; }; rpmpd_opp_svs: opp5 { opp-level = <RPM_SMD_LEVEL_SVS>; }; rpmpd_opp_svs_plus: opp6 { opp-level = <RPM_SMD_LEVEL_SVS_PLUS>; }; rpmpd_opp_nom: opp7 { opp-level = <RPM_SMD_LEVEL_NOM>; }; rpmpd_opp_nom_plus: opp8 { opp-level = <RPM_SMD_LEVEL_NOM_PLUS>; }; rpmpd_opp_turbo: opp9 { opp-level = <RPM_SMD_LEVEL_TURBO>; }; rpmpd_opp_turbo_no_cpr: opp10 { opp-level = <RPM_SMD_LEVEL_TURBO_NO_CPR>; }; rpmpd_opp_turbo_high: opp111 { opp-level = <RPM_SMD_LEVEL_TURBO_HIGH>; }; }; }; }; }; }; reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; ext-region@85b00000 { reg = <0x0 0x85b00000 0x0 0x500000>; no-map; }; smem@86300000 { compatible = "qcom,smem"; reg = <0x0 0x86300000 0x0 0x100000>; no-map; hwlocks = <&tcsr_mutex 3>; qcom,rpm-msg-ram = <&rpm_msg_ram>; }; reserved@86400000 { reg = <0x0 0x86400000 0x0 0x800000>; no-map; }; mpss_mem: mpss@86c00000 { reg = <0x0 0x86c00000 0x0 0x5600000>; no-map; }; lpass_mem: lpass@8c200000 { reg = <0x0 0x8c200000 0x0 0x1000000>; no-map; }; wcnss_fw_mem: wcnss@8d200000 { reg = <0x0 0x8d200000 0x0 0x800000>; no-map; }; venus_mem: memory@8da00000 { reg = <0x0 0x8da00000 0x0 0x2600000>; no-map; }; tz-apps@8dd00000 { reg = <0x0 0x8dd00000 0x0 0x1400000>; no-map; }; }; smp2p-hexagon { compatible = "qcom,smp2p"; interrupts = <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>; mboxes = <&apcs 10>; qcom,local-pid = <0>; qcom,remote-pid = <2>; qcom,smem = <443>, <429>; adsp_smp2p_out: master-kernel { qcom,entry-name = "master-kernel"; #qcom,smem-state-cells = <1>; }; adsp_smp2p_in: slave-kernel { qcom,entry-name = "slave-kernel"; interrupt-controller; #interrupt-cells = <2>; }; }; smp2p-modem { compatible = "qcom,smp2p"; interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>; mboxes = <&apcs 14>; qcom,local-pid = <0>; qcom,remote-pid = <1>; qcom,smem = <435>, <428>; modem_smp2p_out: master-kernel { qcom,entry-name = "master-kernel"; #qcom,smem-state-cells = <1>; }; modem_smp2p_in: slave-kernel { qcom,entry-name = "slave-kernel"; interrupt-controller; #interrupt-cells = <2>; }; }; smp2p-wcnss { compatible = "qcom,smp2p"; interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>; mboxes = <&apcs 18>; qcom,local-pid = <0>; qcom,remote-pid = <4>; qcom,smem = <451>, <431>; wcnss_smp2p_out: master-kernel { qcom,entry-name = "master-kernel"; #qcom,smem-state-cells = <1>; }; wcnss_smp2p_in: slave-kernel { qcom,entry-name = "slave-kernel"; interrupt-controller; #interrupt-cells = <2>; }; }; smsm { compatible = "qcom,smsm"; #address-cells = <1>; #size-cells = <0>; mboxes = <0>, <&apcs 13>, <&apcs 9>, <&apcs 19>; apps_smsm: apps@0 { reg = <0>; #qcom,smem-state-cells = <1>; }; hexagon_smsm: hexagon@1 { reg = <1>; interrupts = <GIC_SPI 290 IRQ_TYPE_EDGE_RISING>; interrupt-controller; #interrupt-cells = <2>; }; wcnss_smsm: wcnss@6 { reg = <6>; interrupts = <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>; interrupt-controller; #interrupt-cells = <2>; }; }; soc: soc@0 { #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xffffffff>; compatible = "simple-bus"; rng@22000 { compatible = "qcom,prng"; reg = <0x00022000 0x140>; clocks = <&gcc GCC_PRNG_AHB_CLK>; clock-names = "core"; }; rpm_msg_ram: sram@60000 { compatible = "qcom,rpm-msg-ram"; reg = <0x00060000 0x8000>; }; usb_hs_phy: phy@6c000 { compatible = "qcom,usb-hs-28nm-femtophy"; reg = <0x0006c000 0x200>; #phy-cells = <0>; clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&gcc GCC_USB_HS_PHY_CFG_AHB_CLK>, <&gcc GCC_USB2A_PHY_SLEEP_CLK>; clock-names = "ref", "ahb", "sleep"; resets = <&gcc RST_QUSB2_PHY_BCR>, <&gcc RST_USB2_HS_PHY_ONLY_BCR>; reset-names = "phy", "por"; status = "disabled"; }; qfprom: qfprom@a4000 { compatible = "qcom,msm8976-qfprom", "qcom,qfprom"; reg = <0x000a4000 0x1000>; #address-cells = <1>; #size-cells = <1>; tsens_base1: base1@218 { reg = <0x218 1>; bits = <0 8>; }; tsens_s0_p1: s0-p1@219 { reg = <0x219 0x1>; bits = <0 6>; }; tsens_s0_p2: s0-p2@219 { reg = <0x219 0x2>; bits = <6 6>; }; tsens_s1_p1: s1-p1@21a { reg = <0x21a 0x2>; bits = <4 6>; }; tsens_s1_p2: s1-p2@21b { reg = <0x21b 0x1>; bits = <2 6>; }; tsens_s2_p1: s2-p1@21c { reg = <0x21c 0x1>; bits = <0 6>; }; tsens_s2_p2: s2-p2@21c { reg = <0x21c 0x2>; bits = <6 6>; }; tsens_s3_p1: s3-p1@21d { reg = <0x21d 0x2>; bits = <4 6>; }; tsens_s3_p2: s3-p2@21e { reg = <0x21e 0x1>; bits = <2 6>; }; tsens_base2: base2@220 { reg = <0x220 1>; bits = <0 8>; }; tsens_s4_p1: s4-p1@221 { reg = <0x221 0x1>; bits = <0 6>; }; tsens_s4_p2: s4-p2@221 { reg = <0x221 0x2>; bits = <6 6>; }; tsens_s5_p1: s5-p1@222 { reg = <0x222 0x2>; bits = <4 6>; }; tsens_s5_p2: s5-p2@223 { reg = <0x224 0x1>; bits = <2 6>; }; tsens_s6_p1: s6-p1@224 { reg = <0x224 0x1>; bits = <0 6>; }; tsens_s6_p2: s6-p2@224 { reg = <0x224 0x2>; bits = <6 6>; }; tsens_s7_p1: s7-p1@225 { reg = <0x225 0x2>; bits = <4 6>; }; tsens_s7_p2: s7-p2@226 { reg = <0x226 0x2>; bits = <2 6>; }; tsens_mode: mode@228 { reg = <0x228 1>; bits = <0 3>; }; tsens_s8_p1: s8-p1@228 { reg = <0x228 0x2>; bits = <3 6>; }; tsens_s8_p2: s8-p2@229 { reg = <0x229 0x1>; bits = <1 6>; }; tsens_s9_p1: s9-p1@229 { reg = <0x229 0x2>; bits = <7 6>; }; tsens_s9_p2: s9-p2@22a { reg = <0x22a 0x2>; bits = <5 6>; }; tsens_s10_p1: s10-p1@22b { reg = <0x22b 0x2>; bits = <3 6>; }; tsens_s10_p2: s10-p2@22c { reg = <0x22c 0x1>; bits = <1 6>; }; }; tsens: thermal-sensor@4a9000 { compatible = "qcom,msm8976-tsens", "qcom,tsens-v1"; reg = <0x004a9000 0x1000>, /* TM */ <0x004a8000 0x1000>; /* SROT */ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "uplow"; nvmem-cells = <&tsens_mode>, <&tsens_base1>, <&tsens_base2>, <&tsens_s0_p1>, <&tsens_s0_p2>, <&tsens_s1_p1>, <&tsens_s1_p2>, <&tsens_s2_p1>, <&tsens_s2_p2>, <&tsens_s3_p1>, <&tsens_s3_p2>, <&tsens_s4_p1>, <&tsens_s4_p2>, <&tsens_s5_p1>, <&tsens_s5_p2>, <&tsens_s6_p1>, <&tsens_s6_p2>, <&tsens_s7_p1>, <&tsens_s7_p2>, <&tsens_s8_p1>, <&tsens_s8_p2>, <&tsens_s9_p1>, <&tsens_s9_p2>, <&tsens_s10_p1>, <&tsens_s10_p2>; nvmem-cell-names = "mode", "base1", "base2", "s0_p1", "s0_p2", "s1_p1", "s1_p2", "s2_p1", "s2_p2", "s3_p1", "s3_p2", "s4_p1", "s4_p2", "s5_p1", "s5_p2", "s6_p1", "s6_p2", "s7_p1", "s7_p2", "s8_p1", "s8_p2", "s9_p1", "s9_p2", "s10_p1", "s10_p2"; #qcom,sensors = <11>; #thermal-sensor-cells = <1>; }; restart@4ab000 { compatible = "qcom,pshold"; reg = <0x004ab000 0x4>; }; tlmm: pinctrl@1000000 { compatible = "qcom,msm8976-pinctrl"; reg = <0x01000000 0x300000>; interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>; #gpio-cells = <2>; gpio-controller; gpio-ranges = <&tlmm 0 0 145>; interrupt-controller; #interrupt-cells = <2>; spi1_default: spi0-default-state { spi-pins { pins = "gpio0", "gpio1", "gpio3"; function = "blsp_spi1"; drive-strength = <12>; bias-disable; }; cs-pins { pins = "gpio2"; function = "blsp_spi1"; drive-strength = <2>; bias-disable; }; }; spi1_sleep: spi0-sleep-state { spi-pins { pins = "gpio0", "gpio1", "gpio3"; function = "gpio"; drive-strength = <2>; bias-pull-down; }; cs-pins { pins = "gpio2"; function = "gpio"; drive-strength = <2>; bias-disable; }; }; blsp1_i2c2_default: blsp1-i2c2-default-state { pins = "gpio6", "gpio7"; function = "blsp_i2c2"; drive-strength = <2>; bias-disable; }; blsp1_i2c2_sleep: blsp1-i2c2-sleep-state { pins = "gpio6", "gpio7"; function = "gpio"; drive-strength = <2>; bias-disable; }; blsp1_i2c4_default: blsp1-i2c4-default-state { pins = "gpio14", "gpio15"; function = "blsp_i2c4"; drive-strength = <2>; bias-disable; }; blsp1_i2c4_sleep: blsp1-i2c4-sleep-state { pins = "gpio14", "gpio15"; function = "gpio"; drive-strength = <2>; bias-disable; }; blsp2_uart2_active: blsp2-uart2-active-state { pins = "gpio20", "gpio21"; function = "blsp_uart6"; drive-strength = <4>; bias-disable; }; blsp2_uart2_sleep: blsp2-uart2-sleep-state { pins = "gpio20", "gpio21"; function = "gpio"; drive-strength = <2>; bias-disable; }; /* 4 (not 6!) interfaces per QUP, BLSP2 indexes are numbered (n)+4 */ blsp2_i2c2_default: blsp2-i2c2-default-state { pins = "gpio22", "gpio23"; function = "blsp_i2c6"; drive-strength = <2>; bias-disable; }; blsp2_i2c2_sleep: blsp2-i2c2-sleep-state { pins = "gpio22", "gpio23"; function = "gpio"; drive-strength = <2>; bias-disable; }; blsp2_i2c4_default: blsp2-i2c4-default-state { pins = "gpio18", "gpio19"; function = "blsp_i2c8"; drive-strength = <2>; bias-disable; }; blsp2_i2c4_sleep: blsp2-i2c4-sleep-state { pins = "gpio18", "gpio19"; function = "gpio"; drive-strength = <2>; bias-disable; }; wcss_wlan_default: wcss-wlan-default-state { wcss-wlan2-pins { pins = "gpio40"; function = "wcss_wlan2"; drive-strength = <6>; bias-pull-up; }; wcss-wlan1-pins { pins = "gpio41"; function = "wcss_wlan1"; drive-strength = <6>; bias-pull-up; }; wcss-wlan0-pins { pins = "gpio42"; function = "wcss_wlan0"; drive-strength = <6>; bias-pull-up; }; wcss-wlan-pins { pins = "gpio43", "gpio44"; function = "wcss_wlan"; drive-strength = <6>; bias-pull-up; }; }; }; gcc: clock-controller@1800000 { compatible = "qcom,gcc-msm8976"; reg = <0x01800000 0x80000>; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; assigned-clocks = <&gcc GPLL3>; assigned-clock-rates = <1100000000>; clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&rpmcc RPM_SMD_XO_A_CLK_SRC>, <&mdss_dsi0_phy 1>, <&mdss_dsi0_phy 0>, <&mdss_dsi1_phy 1>, <&mdss_dsi1_phy 0>; clock-names = "xo", "xo_a", "dsi0pll", "dsi0pllbyte", "dsi1pll", "dsi1pllbyte"; }; tcsr_mutex: hwlock@1905000 { compatible = "qcom,tcsr-mutex"; reg = <0x01905000 0x20000>; #hwlock-cells = <1>; }; tcsr: syscon@1937000 { compatible = "qcom,msm8976-tcsr", "syscon"; reg = <0x01937000 0x30000>; }; mdss: display-subsystem@1a00000 { compatible = "qcom,mdss"; reg = <0x01a00000 0x1000>, <0x01ab0000 0x3000>; reg-names = "mdss_phys", "vbif_phys"; power-domains = <&gcc MDSS_GDSC>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; interrupt-controller; #interrupt-cells = <1>; clocks = <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_VSYNC_CLK>, <&gcc GCC_MDSS_MDP_CLK>; clock-names = "iface", "bus", "vsync", "core"; #address-cells = <1>; #size-cells = <1>; ranges; status = "disabled"; mdss_mdp: display-controller@1a01000 { compatible = "qcom,msm8976-mdp5", "qcom,mdp5"; reg = <0x01a01000 0x89000>; reg-names = "mdp_phys"; interrupt-parent = <&mdss>; interrupts = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_MDP_CLK>, <&gcc GCC_MDSS_VSYNC_CLK>, <&gcc GCC_MDP_TBU_CLK>, <&gcc GCC_MDP_RT_TBU_CLK>; clock-names = "iface", "bus", "core", "vsync", "tbu", "tbu_rt"; operating-points-v2 = <&mdp_opp_table>; power-domains = <&gcc MDSS_GDSC>; iommus = <&apps_iommu 22>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; mdss_mdp5_intf1_out: endpoint { remote-endpoint = <&mdss_dsi0_in>; }; }; port@1 { reg = <1>; mdss_mdp5_intf2_out: endpoint { remote-endpoint = <&mdss_dsi1_in>; }; }; }; mdp_opp_table: opp-table { compatible = "operating-points-v2"; opp-177780000 { opp-hz = /bits/ 64 <177780000>; required-opps = <&rpmpd_opp_svs>; }; opp-270000000 { opp-hz = /bits/ 64 <270000000>; required-opps = <&rpmpd_opp_svs_plus>; }; opp-320000000 { opp-hz = /bits/ 64 <320000000>; required-opps = <&rpmpd_opp_nom>; }; opp-360000000 { opp-hz = /bits/ 64 <360000000>; required-opps = <&rpmpd_opp_turbo>; }; }; }; mdss_dsi0: dsi@1a94000 { compatible = "qcom,msm8976-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x01a94000 0x300>; reg-names = "dsi_ctrl"; interrupt-parent = <&mdss>; interrupts = <4>; clocks = <&gcc GCC_MDSS_MDP_CLK>, <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_BYTE0_CLK>, <&gcc GCC_MDSS_PCLK0_CLK>, <&gcc GCC_MDSS_ESC0_CLK>; clock-names = "mdp_core", "iface", "bus", "byte", "pixel", "core"; assigned-clocks = <&gcc GCC_MDSS_BYTE0_CLK_SRC>, <&gcc GCC_MDSS_PCLK0_CLK_SRC>; assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>; phys = <&mdss_dsi0_phy>; operating-points-v2 = <&dsi0_opp_table>; power-domains = <&gcc MDSS_GDSC>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; mdss_dsi0_in: endpoint { remote-endpoint = <&mdss_mdp5_intf1_out>; }; }; port@1 { reg = <1>; mdss_dsi0_out: endpoint { }; }; }; dsi0_opp_table: opp-table { compatible = "operating-points-v2"; opp-125000000 { opp-hz = /bits/ 64 <125000000>; required-opps = <&rpmpd_opp_svs>; }; opp-161250000 { opp-hz = /bits/ 64 <161250000>; required-opps = <&rpmpd_opp_svs_plus>; }; opp-187500000 { opp-hz = /bits/ 64 <187500000>; required-opps = <&rpmpd_opp_nom>; }; }; }; mdss_dsi1: dsi@1a96000 { compatible = "qcom,msm8976-dsi-ctrl", "qcom,mdss-dsi-ctrl"; reg = <0x01a96000 0x300>; reg-names = "dsi_ctrl"; interrupt-parent = <&mdss>; interrupts = <5>; clocks = <&gcc GCC_MDSS_MDP_CLK>, <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_BYTE1_CLK>, <&gcc GCC_MDSS_PCLK1_CLK>, <&gcc GCC_MDSS_ESC1_CLK>; clock-names = "mdp_core", "iface", "bus", "byte", "pixel", "core"; assigned-clocks = <&gcc GCC_MDSS_BYTE1_CLK_SRC>, <&gcc GCC_MDSS_PCLK1_CLK_SRC>; assigned-clock-parents = <&mdss_dsi1_phy 0>, <&mdss_dsi1_phy 1>; phys = <&mdss_dsi1_phy>; operating-points-v2 = <&dsi0_opp_table>; power-domains = <&gcc MDSS_GDSC>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; mdss_dsi1_in: endpoint { remote-endpoint = <&mdss_mdp5_intf2_out>; }; }; port@1 { reg = <1>; mdss_dsi1_out: endpoint { }; }; }; }; mdss_dsi0_phy: phy@1a94a00 { compatible = "qcom,dsi-phy-28nm-hpm-fam-b"; reg = <0x01a94a00 0xd4>, <0x01a94400 0x280>, <0x01a94b80 0x30>; reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator"; #clock-cells = <1>; #phy-cells = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "iface", "ref"; status = "disabled"; }; mdss_dsi1_phy: phy@1a96a00 { compatible = "qcom,dsi-phy-28nm-hpm-fam-b"; reg = <0x01a96a00 0xd4>, <0x01a96400 0x280>, <0x01a96b80 0x30>; reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator"; #clock-cells = <1>; #phy-cells = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "iface", "ref"; status = "disabled"; }; }; adreno_gpu: gpu@1c00000 { compatible = "qcom,adreno-510.0", "qcom,adreno"; reg = <0x01c00000 0x40000>; reg-names = "kgsl_3d0_reg_memory"; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "kgsl_3d0_irq"; clocks = <&gcc GCC_GFX3D_OXILI_CLK>, <&gcc GCC_GFX3D_OXILI_AHB_CLK>, <&gcc GCC_GFX3D_OXILI_GMEM_CLK>, <&gcc GCC_GFX3D_BIMC_CLK>, <&gcc GCC_GFX3D_OXILI_TIMER_CLK>, <&gcc GCC_GFX3D_OXILI_AON_CLK>; clock-names = "core", "iface", "mem", "mem_iface", "rbbmtimer", "alwayson"; power-domains = <&gcc OXILI_GX_GDSC>; iommus = <&gpu_iommu 0>; operating-points-v2 = <&gpu_opp_table>; status = "disabled"; gpu_opp_table: opp-table { compatible = "operating-points-v2"; opp-200000000 { opp-hz = /bits/ 64 <200000000>; required-opps = <&rpmpd_opp_low_svs>; opp-supported-hw = <0xff>; }; opp-300000000 { opp-hz = /bits/ 64 <300000000>; required-opps = <&rpmpd_opp_svs>; opp-supported-hw = <0xff>; }; opp-400000000 { opp-hz = /bits/ 64 <400000000>; required-opps = <&rpmpd_opp_nom>; opp-supported-hw = <0xff>; }; opp-480000000 { opp-hz = /bits/ 64 <480000000>; required-opps = <&rpmpd_opp_nom_plus>; opp-supported-hw = <0xff>; }; opp-540000000 { opp-hz = /bits/ 64 <540000000>; required-opps = <&rpmpd_opp_turbo>; opp-supported-hw = <0xff>; }; opp-600000000 { opp-hz = /bits/ 64 <600000000>; required-opps = <&rpmpd_opp_turbo>; opp-supported-hw = <0xff>; }; }; }; apps_iommu: iommu@1ee0000 { compatible = "qcom,msm8976-iommu", "qcom,msm-iommu-v2"; reg = <0x01ee0000 0x3000>; ranges = <0 0x01e20000 0x20000>; clocks = <&gcc GCC_SMMU_CFG_CLK>, <&gcc GCC_APSS_TCU_CLK>; clock-names = "iface", "bus"; qcom,iommu-secure-id = <17>; #address-cells = <1>; #size-cells = <1>; #iommu-cells = <1>; /* VFE */ iommu-ctx@15000 { compatible = "qcom,msm-iommu-v2-ns"; reg = <0x15000 0x1000>; qcom,ctx-asid = <20>; interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; }; /* VENUS NS */ iommu-ctx@16000 { compatible = "qcom,msm-iommu-v2-ns"; reg = <0x16000 0x1000>; qcom,ctx-asid = <21>; interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; }; /* MDP0 */ iommu-ctx@17000 { compatible = "qcom,msm-iommu-v2-ns"; reg = <0x17000 0x1000>; qcom,ctx-asid = <22>; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; }; }; gpu_iommu: iommu@1f08000 { compatible = "qcom,msm8976-iommu", "qcom,msm-iommu-v2"; ranges = <0 0x01f08000 0x8000>; clocks = <&gcc GCC_SMMU_CFG_CLK>, <&gcc GCC_GFX3D_TCU_CLK>; clock-names = "iface", "bus"; power-domains = <&gcc OXILI_CX_GDSC>; qcom,iommu-secure-id = <18>; #address-cells = <1>; #size-cells = <1>; #iommu-cells = <1>; /* gfx3d user */ iommu-ctx@0 { compatible = "qcom,msm-iommu-v2-ns"; reg = <0x0 0x1000>; qcom,ctx-asid = <0>; interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>; }; /* gfx3d secure */ iommu-ctx@1000 { compatible = "qcom,msm-iommu-v2-sec"; reg = <0x1000 0x1000>; qcom,ctx-asid = <2>; interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>; }; /* gfx3d priv */ iommu-ctx@2000 { compatible = "qcom,msm-iommu-v2-sec"; reg = <0x2000 0x1000>; qcom,ctx-asid = <1>; interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>; }; }; spmi_bus: spmi@200f000 { compatible = "qcom,spmi-pmic-arb"; reg = <0x0200f000 0x1000>, <0x02400000 0x800000>, <0x02c00000 0x800000>, <0x03800000 0x200000>, <0x0200a000 0x2100>; reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "periph_irq"; qcom,channel = <0>; qcom,ee = <0>; #address-cells = <2>; #size-cells = <0>; interrupt-controller; #interrupt-cells = <4>; }; sdhc_1: mmc@7824900 { compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4"; reg = <0x07824900 0x500>, <0x07824000 0x800>; reg-names = "hc", "core"; interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "iface", "core", "xo"; status = "disabled"; }; sdhc_2: mmc@7864900 { compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4"; reg = <0x07864900 0x11c>, <0x07864000 0x800>; reg-names = "hc", "core"; interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC2_AHB_CLK>, <&gcc GCC_SDCC2_APPS_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "iface", "core", "xo"; status = "disabled"; }; blsp1_dma: dma-controller@7884000 { compatible = "qcom,bam-v1.7.0"; reg = <0x07884000 0x1f000>; interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "bam_clk"; #dma-cells = <1>; qcom,ee = <0>; }; blsp1_uart1: serial@78af000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0x078af000 0x200>; interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp1_dma 0>, <&blsp1_dma 1>; dma-names = "tx", "rx"; status = "disabled"; }; blsp1_uart2: serial@78b0000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0x078b0000 0x200>; interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp1_dma 2>, <&blsp1_dma 3>; dma-names = "tx", "rx"; status = "disabled"; }; blsp1_spi1: spi@78b5000 { compatible = "qcom,spi-qup-v2.2.1"; reg = <0x078b5000 0x500>; interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp1_dma 4>, <&blsp1_dma 5>; dma-names = "tx", "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&spi1_default>; pinctrl-1 = <&spi1_sleep>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; blsp1_i2c2: i2c@78b6000 { compatible = "qcom,i2c-qup-v2.2.1"; reg = <0x078b6000 0x500>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; clock-frequency = <400000>; dmas = <&blsp1_dma 6>, <&blsp1_dma 7>; dma-names = "tx", "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp1_i2c2_default>; pinctrl-1 = <&blsp1_i2c2_default>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; blsp1_i2c4: i2c@78b8000 { compatible = "qcom,i2c-qup-v2.2.1"; reg = <0x078b8000 0x500>; interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; clock-frequency = <400000>; dmas = <&blsp1_dma 10>, <&blsp1_dma 11>; dma-names = "tx", "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp1_i2c4_default>; pinctrl-1 = <&blsp1_i2c4_sleep>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; otg: usb@78db000 { compatible = "qcom,ci-hdrc"; reg = <0x078db000 0x200>, <0x078db200 0x200>; interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_USB_HS_AHB_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>; clock-names = "iface", "core"; assigned-clocks = <&gcc GCC_USB_HS_SYSTEM_CLK>; assigned-clock-rates = <80000000>; resets = <&gcc RST_USB_HS_BCR>; reset-names = "core"; ahb-burst-config = <0>; dr_mode = "peripheral"; phy_type = "ulpi"; phy-names = "usb-phy"; phys = <&usb_hs_phy>; status = "disabled"; #reset-cells = <1>; }; sdhc_3: mmc@7a24900 { compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4"; reg = <0x07a24900 0x11c>, <0x07a24000 0x800>; reg-names = "hc", "core"; interrupts = <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC3_AHB_CLK>, <&gcc GCC_SDCC3_APPS_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "iface", "core", "xo"; status = "disabled"; }; blsp2_dma: dma-controller@7ac4000 { compatible = "qcom,bam-v1.7.0"; reg = <0x07ac4000 0x1f000>; interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "bam_clk"; #dma-cells = <1>; qcom,ee = <0>; }; blsp2_uart2: serial@7af0000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0x07af0000 0x200>; interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp2_dma 0>, <&blsp2_dma 1>; dma-names = "tx", "rx"; status = "disabled"; }; blsp2_i2c2: i2c@7af6000 { compatible = "qcom,i2c-qup-v2.2.1"; reg = <0x07af6000 0x600>; interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "core", "iface"; clock-frequency = <400000>; dmas = <&blsp2_dma 6>, <&blsp2_dma 7>; dma-names = "tx", "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp2_i2c2_default>; pinctrl-1 = <&blsp2_i2c2_sleep>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; blsp2_i2c4: i2c@7af8000 { compatible = "qcom,i2c-qup-v2.2.1"; reg = <0x07af8000 0x600>; interrupts = <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP2_QUP4_I2C_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "core", "iface"; clock-frequency = <400000>; dmas = <&blsp2_dma 10>, <&blsp2_dma 11>; dma-names = "tx", "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp2_i2c4_default>; pinctrl-1 = <&blsp2_i2c4_sleep>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; wcnss: remoteproc@a204000 { compatible = "qcom,pronto-v3-pil", "qcom,pronto"; reg = <0x0a204000 0x2000>, <0x0a202000 0x1000>, <0x0a21b000 0x3000>; reg-names = "ccu", "dxe", "pmu"; memory-region = <&wcnss_fw_mem>; interrupts-extended = <&intc GIC_SPI 149 IRQ_TYPE_EDGE_RISING>, <&wcnss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>, <&wcnss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>, <&wcnss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>, <&wcnss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>; interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack"; power-domains = <&rpmpd MSM8976_VDDCX>, <&rpmpd MSM8976_VDDMX>; power-domain-names = "cx", "mx"; qcom,smem-states = <&wcnss_smp2p_out 0>; qcom,smem-state-names = "stop"; pinctrl-0 = <&wcss_wlan_default>; pinctrl-names = "default"; status = "disabled"; wcnss_iris: iris { /* Separate chip, compatible is board-specific */ clocks = <&rpmcc RPM_SMD_RF_CLK2>; clock-names = "xo"; }; smd-edge { interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>; mboxes = <&apcs 17>; qcom,smd-edge = <6>; qcom,remote-pid = <4>; label = "pronto"; wcnss_ctrl: wcnss { compatible = "qcom,wcnss"; qcom,smd-channels = "WCNSS_CTRL"; qcom,mmio = <&wcnss>; wcnss_bt: bluetooth { compatible = "qcom,wcnss-bt"; }; wcnss_wifi: wifi { compatible = "qcom,wcnss-wlan"; interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "tx", "rx"; qcom,smem-states = <&apps_smsm 10>, <&apps_smsm 9>; qcom,smem-state-names = "tx-enable", "tx-rings-empty"; }; }; }; }; intc: interrupt-controller@b000000 { compatible = "qcom,msm-qgic2"; reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>; interrupt-controller; #interrupt-cells = <3>; }; apcs: mailbox@b011000 { compatible = "qcom,msm8976-apcs-kpss-global", "qcom,msm8994-apcs-kpss-global", "syscon"; reg = <0x0b011000 0x1000>; #mbox-cells = <1>; }; timer@b120000 { compatible = "arm,armv7-timer-mem"; reg = <0x0b120000 0x1000>; #address-cells = <1>; #size-cells = <1>; ranges; clock-frequency = <19200000>; frame@b121000 { reg = <0x0b121000 0x1000>, <0x0b122000 0x1000>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; frame-number = <0>; }; frame@b123000 { reg = <0x0b123000 0x1000>; interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; frame-number = <1>; status = "disabled"; }; frame@b124000 { reg = <0x0b124000 0x1000>; interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; frame-number = <2>; status = "disabled"; }; frame@b125000 { reg = <0x0b125000 0x1000>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; frame-number = <3>; status = "disabled"; }; frame@b126000 { reg = <0x0b126000 0x1000>; interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; frame-number = <4>; status = "disabled"; }; frame@b127000 { reg = <0x0b127000 0x1000>; interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; frame-number = <5>; status = "disabled"; }; frame@b128000 { reg = <0x0b128000 0x1000>; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; frame-number = <6>; status = "disabled"; }; }; imem: sram@8600000 { compatible = "qcom,msm8976-imem", "syscon", "simple-mfd"; reg = <0x08600000 0x1000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x08600000 0x1000>; pil-reloc@94c { compatible = "qcom,pil-reloc-info"; reg = <0x94c 0xc8>; }; }; }; thermal-zones { aoss0-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 0>; trips { aoss0_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; }; }; }; modem-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 1>; trips { modem_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; }; }; }; qdsp-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 2>; trips { qdsp_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; }; }; }; cam-isp-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 3>; trips { cam_isp_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; }; }; }; cpu4-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 4>; trips { cpu4_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; cpu4_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; cpu4_crit: cpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; cpu5-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 5>; trips { cpu5_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; cpu5_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; cpu5_crit: cpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; cpu6-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 6>; trips { cpu6_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; cpu6_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; cpu6_crit: cpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; cpu7-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 7>; trips { cpu7_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; cpu7_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; cpu7_crit: cpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; big-l2-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 8>; trips { l2_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; l2_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; l2_crit: l2-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; cpu0-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 9>; trips { cpu0_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; cpu0_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; cpu0_crit: cpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; gpu-thermal { polling-delay-passive = <250>; thermal-sensors = <&tsens 10>; trips { gpu_alert0: trip-point0 { temperature = <50000>; hysteresis = <2000>; type = "hot"; }; gpu_alert1: trip-point1 { temperature = <55000>; hysteresis = <2000>; type = "passive"; }; gpu_crit: gpu-crit { temperature = <75000>; hysteresis = <2000>; type = "critical"; }; }; }; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>; clock-frequency = <19200000>; }; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <[email protected]> */ #include "devl_internal.h" struct devlink_linecard { struct list_head list; struct devlink *devlink; unsigned int index; const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; struct mutex state_lock; /* Protects state */ const char *type; struct devlink_linecard_type *types; unsigned int types_count; u32 rel_index; }; unsigned int devlink_linecard_index(struct devlink_linecard *linecard) { return linecard->index; } static struct devlink_linecard * devlink_linecard_get_by_index(struct devlink *devlink, unsigned int linecard_index) { struct devlink_linecard *devlink_linecard; list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) { if (devlink_linecard->index == linecard_index) return devlink_linecard; } return NULL; } static bool devlink_linecard_index_exists(struct devlink *devlink, unsigned int linecard_index) { return devlink_linecard_get_by_index(devlink, linecard_index); } static struct devlink_linecard * devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) { if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) { u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]); struct devlink_linecard *linecard; linecard = devlink_linecard_get_by_index(devlink, linecard_index); if (!linecard) return ERR_PTR(-ENODEV); return linecard; } return ERR_PTR(-EINVAL); } static struct devlink_linecard * devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info) { return devlink_linecard_get_from_attrs(devlink, info->attrs); } struct devlink_linecard_type { const char *type; const void *priv; }; static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_linecard *linecard, enum devlink_command cmd, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { struct devlink_linecard_type *linecard_type; struct nlattr *attr; void *hdr; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state)) goto nla_put_failure; if (linecard->type && nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type)) goto nla_put_failure; if (linecard->types_count) { attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES); if (!attr) goto nla_put_failure; for (i = 0; i < linecard->types_count; i++) { linecard_type = &linecard->types[i]; if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard_type->type)) { nla_nest_cancel(msg, attr); goto nla_put_failure; } } nla_nest_end(msg, attr); } if (devlink_rel_devlink_handle_put(msg, devlink, linecard->rel_index, DEVLINK_ATTR_NESTED_DEVLINK, NULL)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void devlink_linecard_notify(struct devlink_linecard *linecard, enum devlink_command cmd) { struct devlink *devlink = linecard->devlink; struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW && cmd != DEVLINK_CMD_LINECARD_DEL); if (!__devl_is_registered(devlink) || !devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0, NULL); if (err) { nlmsg_free(msg); return; } devlink_nl_notify_send(devlink, msg); } void devlink_linecards_notify_register(struct devlink *devlink) { struct devlink_linecard *linecard; list_for_each_entry(linecard, &devlink->linecard_list, list) devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); } void devlink_linecards_notify_unregister(struct devlink *devlink) { struct devlink_linecard *linecard; list_for_each_entry_reverse(linecard, &devlink->linecard_list, list) devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); } int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_linecard *linecard; struct sk_buff *msg; int err; linecard = devlink_linecard_get_from_info(devlink, info); if (IS_ERR(linecard)) return PTR_ERR(linecard); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; mutex_lock(&linecard->state_lock); err = devlink_nl_linecard_fill(msg, devlink, linecard, DEVLINK_CMD_LINECARD_NEW, info->snd_portid, info->snd_seq, 0, info->extack); mutex_unlock(&linecard->state_lock); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_linecard_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_linecard *linecard; int idx = 0; int err = 0; list_for_each_entry(linecard, &devlink->linecard_list, list) { if (idx < state->idx) { idx++; continue; } mutex_lock(&linecard->state_lock); err = devlink_nl_linecard_fill(msg, devlink, linecard, DEVLINK_CMD_LINECARD_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, cb->extack); mutex_unlock(&linecard->state_lock); if (err) { state->idx = idx; break; } idx++; } return err; } int devlink_nl_linecard_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one); } static struct devlink_linecard_type * devlink_linecard_type_lookup(struct devlink_linecard *linecard, const char *type) { struct devlink_linecard_type *linecard_type; int i; for (i = 0; i < linecard->types_count; i++) { linecard_type = &linecard->types[i]; if (!strcmp(type, linecard_type->type)) return linecard_type; } return NULL; } static int devlink_linecard_type_set(struct devlink_linecard *linecard, const char *type, struct netlink_ext_ack *extack) { const struct devlink_linecard_ops *ops = linecard->ops; struct devlink_linecard_type *linecard_type; int err; mutex_lock(&linecard->state_lock); if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) { NL_SET_ERR_MSG(extack, "Line card is currently being provisioned"); err = -EBUSY; goto out; } if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) { NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned"); err = -EBUSY; goto out; } linecard_type = devlink_linecard_type_lookup(linecard, type); if (!linecard_type) { NL_SET_ERR_MSG(extack, "Unsupported line card type provided"); err = -EINVAL; goto out; } if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED && linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) { NL_SET_ERR_MSG(extack, "Line card already provisioned"); err = -EBUSY; /* Check if the line card is provisioned in the same * way the user asks. In case it is, make the operation * to return success. */ if (ops->same_provision && ops->same_provision(linecard, linecard->priv, linecard_type->type, linecard_type->priv)) err = 0; goto out; } linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING; linecard->type = linecard_type->type; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); err = ops->provision(linecard, linecard->priv, linecard_type->type, linecard_type->priv, extack); if (err) { /* Provisioning failed. Assume the linecard is unprovisioned * for future operations. */ mutex_lock(&linecard->state_lock); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } return err; out: mutex_unlock(&linecard->state_lock); return err; } static int devlink_linecard_type_unset(struct devlink_linecard *linecard, struct netlink_ext_ack *extack) { int err; mutex_lock(&linecard->state_lock); if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) { NL_SET_ERR_MSG(extack, "Line card is currently being provisioned"); err = -EBUSY; goto out; } if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) { NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned"); err = -EBUSY; goto out; } if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) { linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); err = 0; goto out; } if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) { NL_SET_ERR_MSG(extack, "Line card is not provisioned"); err = 0; goto out; } linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); err = linecard->ops->unprovision(linecard, linecard->priv, extack); if (err) { /* Unprovisioning failed. Assume the linecard is unprovisioned * for future operations. */ mutex_lock(&linecard->state_lock); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } return err; out: mutex_unlock(&linecard->state_lock); return err; } int devlink_nl_linecard_set_doit(struct sk_buff *skb, struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; struct devlink_linecard *linecard; int err; linecard = devlink_linecard_get_from_info(devlink, info); if (IS_ERR(linecard)) return PTR_ERR(linecard); if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) { const char *type; type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]); if (strcmp(type, "")) { err = devlink_linecard_type_set(linecard, type, extack); if (err) return err; } else { err = devlink_linecard_type_unset(linecard, extack); if (err) return err; } } return 0; } static int devlink_linecard_types_init(struct devlink_linecard *linecard) { struct devlink_linecard_type *linecard_type; unsigned int count; int i; count = linecard->ops->types_count(linecard, linecard->priv); linecard->types = kmalloc_array(count, sizeof(*linecard_type), GFP_KERNEL); if (!linecard->types) return -ENOMEM; linecard->types_count = count; for (i = 0; i < count; i++) { linecard_type = &linecard->types[i]; linecard->ops->types_get(linecard, linecard->priv, i, &linecard_type->type, &linecard_type->priv); } return 0; } static void devlink_linecard_types_fini(struct devlink_linecard *linecard) { kfree(linecard->types); } /** * devl_linecard_create - Create devlink linecard * * @devlink: devlink * @linecard_index: driver-specific numerical identifier of the linecard * @ops: linecards ops * @priv: user priv pointer * * Create devlink linecard instance with provided linecard index. * Caller can use any indexing, even hw-related one. * * Return: Line card structure or an ERR_PTR() encoded error code. */ struct devlink_linecard * devl_linecard_create(struct devlink *devlink, unsigned int linecard_index, const struct devlink_linecard_ops *ops, void *priv) { struct devlink_linecard *linecard; int err; if (WARN_ON(!ops || !ops->provision || !ops->unprovision || !ops->types_count || !ops->types_get)) return ERR_PTR(-EINVAL); if (devlink_linecard_index_exists(devlink, linecard_index)) return ERR_PTR(-EEXIST); linecard = kzalloc(sizeof(*linecard), GFP_KERNEL); if (!linecard) return ERR_PTR(-ENOMEM); linecard->devlink = devlink; linecard->index = linecard_index; linecard->ops = ops; linecard->priv = priv; linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; mutex_init(&linecard->state_lock); err = devlink_linecard_types_init(linecard); if (err) { mutex_destroy(&linecard->state_lock); kfree(linecard); return ERR_PTR(err); } list_add_tail(&linecard->list, &devlink->linecard_list); devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); return linecard; } EXPORT_SYMBOL_GPL(devl_linecard_create); /** * devl_linecard_destroy - Destroy devlink linecard * * @linecard: devlink linecard */ void devl_linecard_destroy(struct devlink_linecard *linecard) { devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); list_del(&linecard->list); devlink_linecard_types_fini(linecard); mutex_destroy(&linecard->state_lock); kfree(linecard); } EXPORT_SYMBOL_GPL(devl_linecard_destroy); /** * devlink_linecard_provision_set - Set provisioning on linecard * * @linecard: devlink linecard * @type: linecard type * * This is either called directly from the provision() op call or * as a result of the provision() op call asynchronously. */ void devlink_linecard_provision_set(struct devlink_linecard *linecard, const char *type) { mutex_lock(&linecard->state_lock); WARN_ON(linecard->type && strcmp(linecard->type, type)); linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED; linecard->type = type; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } EXPORT_SYMBOL_GPL(devlink_linecard_provision_set); /** * devlink_linecard_provision_clear - Clear provisioning on linecard * * @linecard: devlink linecard * * This is either called directly from the unprovision() op call or * as a result of the unprovision() op call asynchronously. */ void devlink_linecard_provision_clear(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear); /** * devlink_linecard_provision_fail - Fail provisioning on linecard * * @linecard: devlink linecard * * This is either called directly from the provision() op call or * as a result of the provision() op call asynchronously. */ void devlink_linecard_provision_fail(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail); /** * devlink_linecard_activate - Set linecard active * * @linecard: devlink linecard */ void devlink_linecard_activate(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED); linecard->state = DEVLINK_LINECARD_STATE_ACTIVE; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); mutex_unlock(&linecard->state_lock); } EXPORT_SYMBOL_GPL(devlink_linecard_activate); /** * devlink_linecard_deactivate - Set linecard inactive * * @linecard: devlink linecard */ void devlink_linecard_deactivate(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); switch (linecard->state) { case DEVLINK_LINECARD_STATE_ACTIVE: linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); break; case DEVLINK_LINECARD_STATE_UNPROVISIONING: /* Line card is being deactivated as part * of unprovisioning flow. */ break; default: WARN_ON(1); break; } mutex_unlock(&linecard->state_lock); } EXPORT_SYMBOL_GPL(devlink_linecard_deactivate); static void devlink_linecard_rel_notify_cb(struct devlink *devlink, u32 linecard_index) { struct devlink_linecard *linecard; linecard = devlink_linecard_get_by_index(devlink, linecard_index); if (!linecard) return; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); } static void devlink_linecard_rel_cleanup_cb(struct devlink *devlink, u32 linecard_index, u32 rel_index) { struct devlink_linecard *linecard; linecard = devlink_linecard_get_by_index(devlink, linecard_index); if (linecard && linecard->rel_index == rel_index) linecard->rel_index = 0; } /** * devlink_linecard_nested_dl_set - Attach/detach nested devlink * instance to linecard. * * @linecard: devlink linecard * @nested_devlink: devlink instance to attach or NULL to detach */ int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, struct devlink *nested_devlink) { return devlink_rel_nested_in_add(&linecard->rel_index, linecard->devlink->index, linecard->index, devlink_linecard_rel_notify_cb, devlink_linecard_rel_cleanup_cb, nested_devlink); } EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Copyright 2020 Compass Electronics Group, LLC */ /dts-v1/; #include "imx8mm.dtsi" #include "imx8mm-beacon-som.dtsi" #include "imx8mm-beacon-baseboard.dtsi" / { model = "Beacon EmbeddedWorks i.MX8M Mini Development Kit"; compatible = "beacon,imx8mm-beacon-kit", "fsl,imx8mm"; chosen { stdout-path = &uart2; }; connector { compatible = "hdmi-connector"; type = "a"; port { hdmi_connector_in: endpoint { remote-endpoint = <&adv7535_out>; }; }; }; reg_hdmi: regulator-hdmi-dvdd { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_hdmi>; regulator-name = "hdmi_pwr_en"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>; enable-active-high; startup-delay-us = <70000>; regulator-always-on; }; sound-hdmi { compatible = "simple-audio-card"; simple-audio-card,name = "sound-hdmi"; simple-audio-card,format = "i2s"; simple-audio-card,cpu { sound-dai = <&sai5>; system-clock-direction-out; }; simple-audio-card,codec { sound-dai = <&adv_bridge>; }; }; }; &i2c2 { adv_bridge: hdmi@3d { compatible = "adi,adv7535"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hdmi_bridge>; reg = <0x3d>, <0x3e>, <0x3c>, <0x3f>; reg-names = "main", "edid", "cec", "packet"; adi,dsi-lanes = <4>; avdd-supply = <&reg_hdmi>; a2vdd-supply = <&reg_hdmi>; dvdd-supply = <&reg_hdmi>; pvdd-supply = <&reg_hdmi>; v1p2-supply = <&reg_hdmi>; v3p3-supply = <&reg_hdmi>; interrupt-parent = <&gpio1>; interrupts = <9 IRQ_TYPE_LEVEL_LOW>; #sound-dai-cells = <0>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; adv7535_in: endpoint { remote-endpoint = <&dsi_out>; }; }; port@1 { reg = <1>; adv7535_out: endpoint { remote-endpoint = <&hdmi_connector_in>; }; }; }; }; }; &lcdif { status = "okay"; }; &mipi_dsi { samsung,esc-clock-frequency = <20000000>; status = "okay"; ports { port@1 { reg = <1>; dsi_out: endpoint { remote-endpoint = <&adv7535_in>; }; }; }; }; &sai5 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sai5>; assigned-clocks = <&clk IMX8MM_CLK_SAI5>; assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>; assigned-clock-rates = <24576000>; #sound-dai-cells = <0>; status = "okay"; }; &iomuxc { pinctrl_hdmi_bridge: hdmibridgegrp { fsl,pins = < MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 >; }; pinctrl_reg_hdmi: reghdmigrp { fsl,pins = < MX8MM_IOMUXC_SD1_STROBE_GPIO2_IO11 0x16 >; }; pinctrl_sai5: sai5grp { fsl,pins = < MX8MM_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0xd6 MX8MM_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0xd6 MX8MM_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0xd6 >; }; };
/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef SPECTRAL_COMMON_H #define SPECTRAL_COMMON_H #define SPECTRAL_HT20_NUM_BINS 56 #define SPECTRAL_HT20_40_NUM_BINS 128 /* TODO: could possibly be 512, but no samples this large * could be acquired so far. */ #define SPECTRAL_ATH10K_MAX_NUM_BINS 256 /* FFT sample format given to userspace via debugfs. * * Please keep the type/length at the front position and change * other fields after adding another sample type * * TODO: this might need rework when switching to nl80211-based * interface. */ enum ath_fft_sample_type { ATH_FFT_SAMPLE_HT20 = 1, ATH_FFT_SAMPLE_HT20_40, ATH_FFT_SAMPLE_ATH10K, ATH_FFT_SAMPLE_ATH11K }; struct fft_sample_tlv { u8 type; /* see ath_fft_sample */ __be16 length; /* type dependent data follows */ } __packed; struct fft_sample_ht20 { struct fft_sample_tlv tlv; u8 max_exp; __be16 freq; s8 rssi; s8 noise; __be16 max_magnitude; u8 max_index; u8 bitmap_weight; __be64 tsf; u8 data[SPECTRAL_HT20_NUM_BINS]; } __packed; struct fft_sample_ht20_40 { struct fft_sample_tlv tlv; u8 channel_type; __be16 freq; s8 lower_rssi; s8 upper_rssi; __be64 tsf; s8 lower_noise; s8 upper_noise; __be16 lower_max_magnitude; __be16 upper_max_magnitude; u8 lower_max_index; u8 upper_max_index; u8 lower_bitmap_weight; u8 upper_bitmap_weight; u8 max_exp; u8 data[SPECTRAL_HT20_40_NUM_BINS]; } __packed; struct fft_sample_ath10k { struct fft_sample_tlv tlv; u8 chan_width_mhz; __be16 freq1; __be16 freq2; __be16 noise; __be16 max_magnitude; __be16 total_gain_db; __be16 base_pwr_db; __be64 tsf; s8 max_index; u8 rssi; u8 relpwr_db; u8 avgpwr_db; u8 max_exp; u8 data[]; } __packed; struct fft_sample_ath11k { struct fft_sample_tlv tlv; u8 chan_width_mhz; s8 max_index; u8 max_exp; __be16 freq1; __be16 freq2; __be16 max_magnitude; __be16 rssi; __be32 tsf; __be32 noise; u8 data[]; } __packed; #endif /* SPECTRAL_COMMON_H */
#ifndef __GPIO_ASPEED_H #define __GPIO_ASPEED_H #include <linux/types.h> struct gpio_desc; struct aspeed_gpio_copro_ops { int (*request_access)(void *data); int (*release_access)(void *data); }; int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, u16 *vreg_offset, u16 *dreg_offset, u8 *bit); int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc); int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data); #endif /* __GPIO_ASPEED_H */
// SPDX-License-Identifier: GPL-2.0 /* * Thunderbolt driver - bus logic (NHI independent) * * Copyright (c) 2014 Andreas Noever <[email protected]> * Copyright (C) 2019, Intel Corporation */ #include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/platform_data/x86/apple.h> #include "tb.h" #include "tb_regs.h" #include "tunnel.h" #define TB_TIMEOUT 100 /* ms */ #define TB_RELEASE_BW_TIMEOUT 10000 /* ms */ /* * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver * direction. This is 40G - 10% guard band bandwidth. */ #define TB_ASYM_MIN (40000 * 90 / 100) /* * Threshold bandwidth (in Mb/s) that is used to switch the links to * asymmetric and back. This is selected as 45G which means when the * request is higher than this, we switch the link to asymmetric, and * when it is less than this we switch it back. The 45G is selected so * that we still have 27G (of the total 72G) for bulk PCIe traffic when * switching back to symmetric. */ #define TB_ASYM_THRESHOLD 45000 #define MAX_GROUPS 7 /* max Group_ID is 7 */ static unsigned int asym_threshold = TB_ASYM_THRESHOLD; module_param_named(asym_threshold, asym_threshold, uint, 0444); MODULE_PARM_DESC(asym_threshold, "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: " __MODULE_STRING(TB_ASYM_THRESHOLD) ")"); /** * struct tb_cm - Simple Thunderbolt connection manager * @tunnel_list: List of active tunnels * @dp_resources: List of available DP resources for DP tunneling * @hotplug_active: tb_handle_hotplug will stop progressing plug * events and exit if this is not set (it needs to * acquire the lock one more time). Used to drain wq * after cfg has been paused. * @remove_work: Work used to remove any unplugged routers after * runtime resume * @groups: Bandwidth groups used in this domain. */ struct tb_cm { struct list_head tunnel_list; struct list_head dp_resources; bool hotplug_active; struct delayed_work remove_work; struct tb_bandwidth_group groups[MAX_GROUPS]; }; static inline struct tb *tcm_to_tb(struct tb_cm *tcm) { return ((void *)tcm - sizeof(struct tb)); } struct tb_hotplug_event { struct work_struct work; struct tb *tb; u64 route; u8 port; bool unplug; }; static void tb_handle_hotplug(struct work_struct *work); static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) { struct tb_hotplug_event *ev; ev = kmalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return; ev->tb = tb; ev->route = route; ev->port = port; ev->unplug = unplug; INIT_WORK(&ev->work, tb_handle_hotplug); queue_work(tb->wq, &ev->work); } /* enumeration & hot plug handling */ static void tb_add_dp_resources(struct tb_switch *sw) { struct tb_cm *tcm = tb_priv(sw->tb); struct tb_port *port; tb_switch_for_each_port(sw, port) { if (!tb_port_is_dpin(port)) continue; if (!tb_switch_query_dp_resource(sw, port)) continue; /* * If DP IN on device router exist, position it at the * beginning of the DP resources list, so that it is used * before DP IN of the host router. This way external GPU(s) * will be prioritized when pairing DP IN to a DP OUT. */ if (tb_route(sw)) list_add(&port->list, &tcm->dp_resources); else list_add_tail(&port->list, &tcm->dp_resources); tb_port_dbg(port, "DP IN resource available\n"); } } static void tb_remove_dp_resources(struct tb_switch *sw) { struct tb_cm *tcm = tb_priv(sw->tb); struct tb_port *port, *tmp; /* Clear children resources first */ tb_switch_for_each_port(sw, port) { if (tb_port_has_remote(port)) tb_remove_dp_resources(port->remote->sw); } list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { if (port->sw == sw) { tb_port_dbg(port, "DP OUT resource unavailable\n"); list_del_init(&port->list); } } } static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *p; list_for_each_entry(p, &tcm->dp_resources, list) { if (p == port) return; } tb_port_dbg(port, "DP %s resource available discovered\n", tb_port_is_dpin(port) ? "IN" : "OUT"); list_add_tail(&port->list, &tcm->dp_resources); } static void tb_discover_dp_resources(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_dp(tunnel)) tb_discover_dp_resource(tb, tunnel->dst_port); } } /* Enables CL states up to host router */ static int tb_enable_clx(struct tb_switch *sw) { struct tb_cm *tcm = tb_priv(sw->tb); unsigned int clx = TB_CL0S | TB_CL1; const struct tb_tunnel *tunnel; int ret; /* * Currently only enable CLx for the first link. This is enough * to allow the CPU to save energy at least on Intel hardware * and makes it slightly simpler to implement. We may change * this in the future to cover the whole topology if it turns * out to be beneficial. */ while (sw && tb_switch_depth(sw) > 1) sw = tb_switch_parent(sw); if (!sw) return 0; if (tb_switch_depth(sw) != 1) return 0; /* * If we are re-enabling then check if there is an active DMA * tunnel and in that case bail out. */ list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_dma(tunnel)) { if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) return 0; } } /* * Initially try with CL2. If that's not supported by the * topology try with CL0s and CL1 and then give up. */ ret = tb_switch_clx_enable(sw, clx | TB_CL2); if (ret == -EOPNOTSUPP) ret = tb_switch_clx_enable(sw, clx); return ret == -EOPNOTSUPP ? 0 : ret; } /** * tb_disable_clx() - Disable CL states up to host router * @sw: Router to start * * Disables CL states from @sw up to the host router. Returns true if * any CL state were disabled. This can be used to figure out whether * the link was setup by us or the boot firmware so we don't * accidentally enable them if they were not enabled during discovery. */ static bool tb_disable_clx(struct tb_switch *sw) { bool disabled = false; do { int ret; ret = tb_switch_clx_disable(sw); if (ret > 0) disabled = true; else if (ret < 0) tb_sw_warn(sw, "failed to disable CL states\n"); sw = tb_switch_parent(sw); } while (sw); return disabled; } static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) { struct tb_switch *sw; sw = tb_to_switch(dev); if (!sw) return 0; if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { enum tb_switch_tmu_mode mode; int ret; if (tb_switch_clx_is_enabled(sw, TB_CL1)) mode = TB_SWITCH_TMU_MODE_HIFI_UNI; else mode = TB_SWITCH_TMU_MODE_HIFI_BI; ret = tb_switch_tmu_configure(sw, mode); if (ret) return ret; return tb_switch_tmu_enable(sw); } return 0; } static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) { struct tb_switch *sw; if (!tunnel) return; /* * Once first DP tunnel is established we change the TMU * accuracy of first depth child routers (and the host router) * to the highest. This is needed for the DP tunneling to work * but also allows CL0s. * * If both routers are v2 then we don't need to do anything as * they are using enhanced TMU mode that allows all CLx. */ sw = tunnel->tb->root_switch; device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); } static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used) { struct tb_switch *sw = tb_to_switch(dev); if (sw && tb_switch_tmu_is_enabled(sw) && tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) return 1; return device_for_each_child(dev, NULL, tb_switch_tmu_hifi_uni_required); } static bool tb_tmu_hifi_uni_required(struct tb *tb) { return device_for_each_child(&tb->dev, NULL, tb_switch_tmu_hifi_uni_required) == 1; } static int tb_enable_tmu(struct tb_switch *sw) { int ret; /* * If both routers at the end of the link are v2 we simply * enable the enhanched uni-directional mode. That covers all * the CL states. For v1 and before we need to use the normal * rate to allow CL1 (when supported). Otherwise we keep the TMU * running at the highest accuracy. */ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); if (ret == -EOPNOTSUPP) { if (tb_switch_clx_is_enabled(sw, TB_CL1)) { /* * Figure out uni-directional HiFi TMU requirements * currently in the domain. If there are no * uni-directional HiFi requirements we can put the TMU * into LowRes mode. * * Deliberately skip bi-directional HiFi links * as these work independently of other links * (and they do not allow any CL states anyway). */ if (tb_tmu_hifi_uni_required(sw->tb)) ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_UNI); else ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_LOWRES); } else { ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); } /* If not supported, fallback to bi-directional HiFi */ if (ret == -EOPNOTSUPP) ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); } if (ret) return ret; /* If it is already enabled in correct mode, don't touch it */ if (tb_switch_tmu_is_enabled(sw)) return 0; ret = tb_switch_tmu_disable(sw); if (ret) return ret; ret = tb_switch_tmu_post_time(sw); if (ret) return ret; return tb_switch_tmu_enable(sw); } static void tb_switch_discover_tunnels(struct tb_switch *sw, struct list_head *list, bool alloc_hopids) { struct tb *tb = sw->tb; struct tb_port *port; tb_switch_for_each_port(sw, port) { struct tb_tunnel *tunnel = NULL; switch (port->config.type) { case TB_TYPE_DP_HDMI_IN: tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); tb_increase_tmu_accuracy(tunnel); break; case TB_TYPE_PCIE_DOWN: tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); break; case TB_TYPE_USB3_DOWN: tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); break; default: break; } if (tunnel) list_add_tail(&tunnel->list, list); } tb_switch_for_each_port(sw, port) { if (tb_port_has_remote(port)) { tb_switch_discover_tunnels(port->remote->sw, list, alloc_hopids); } } } static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) { if (tb_switch_is_usb4(port->sw)) return usb4_port_configure_xdomain(port, xd); return tb_lc_configure_xdomain(port); } static void tb_port_unconfigure_xdomain(struct tb_port *port) { if (tb_switch_is_usb4(port->sw)) usb4_port_unconfigure_xdomain(port); else tb_lc_unconfigure_xdomain(port); } static void tb_scan_xdomain(struct tb_port *port) { struct tb_switch *sw = port->sw; struct tb *tb = sw->tb; struct tb_xdomain *xd; u64 route; if (!tb_is_xdomain_enabled()) return; route = tb_downstream_route(port); xd = tb_xdomain_find_by_route(tb, route); if (xd) { tb_xdomain_put(xd); return; } xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, NULL); if (xd) { tb_port_at(route, sw)->xdomain = xd; tb_port_configure_xdomain(port, xd); tb_xdomain_add(xd); } } /** * tb_find_unused_port() - return the first inactive port on @sw * @sw: Switch to find the port on * @type: Port type to look for */ static struct tb_port *tb_find_unused_port(struct tb_switch *sw, enum tb_port_type type) { struct tb_port *port; tb_switch_for_each_port(sw, port) { if (tb_is_upstream_port(port)) continue; if (port->config.type != type) continue; if (!port->cap_adap) continue; if (tb_port_is_enabled(port)) continue; return port; } return NULL; } static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, const struct tb_port *port) { struct tb_port *down; down = usb4_switch_map_usb3_down(sw, port); if (down && !tb_usb3_port_is_enabled(down)) return down; return NULL; } static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, struct tb_port *src_port, struct tb_port *dst_port) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tunnel->type == type && ((src_port && src_port == tunnel->src_port) || (dst_port && dst_port == tunnel->dst_port))) { return tunnel; } } return NULL; } static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port) { struct tb_port *port, *usb3_down; struct tb_switch *sw; /* Pick the router that is deepest in the topology */ if (tb_port_path_direction_downstream(src_port, dst_port)) sw = dst_port->sw; else sw = src_port->sw; /* Can't be the host router */ if (sw == tb->root_switch) return NULL; /* Find the downstream USB4 port that leads to this router */ port = tb_port_at(tb_route(sw), tb->root_switch); /* Find the corresponding host router USB3 downstream port */ usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); if (!usb3_down) return NULL; return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); } /** * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link * @tb: Domain structure * @src_port: Source protocol adapter * @dst_port: Destination protocol adapter * @port: USB4 port the consumed bandwidth is calculated * @consumed_up: Consumed upsream bandwidth (Mb/s) * @consumed_down: Consumed downstream bandwidth (Mb/s) * * Calculates consumed USB3 and PCIe bandwidth at @port between path * from @src_port to @dst_port. Does not take USB3 tunnel starting from * @src_port and ending on @src_port into account because that bandwidth is * already included in as part of the "first hop" USB3 tunnel. */ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, struct tb_port *port, int *consumed_up, int *consumed_down) { int pci_consumed_up, pci_consumed_down; struct tb_tunnel *tunnel; *consumed_up = *consumed_down = 0; tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); if (tunnel && !tb_port_is_usb3_down(src_port) && !tb_port_is_usb3_up(dst_port)) { int ret; ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up, consumed_down); if (ret) return ret; } /* * If there is anything reserved for PCIe bulk traffic take it * into account here too. */ if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) { *consumed_up += pci_consumed_up; *consumed_down += pci_consumed_down; } return 0; } /** * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link * @tb: Domain structure * @src_port: Source protocol adapter * @dst_port: Destination protocol adapter * @port: USB4 port the consumed bandwidth is calculated * @consumed_up: Consumed upsream bandwidth (Mb/s) * @consumed_down: Consumed downstream bandwidth (Mb/s) * * Calculates consumed DP bandwidth at @port between path from @src_port * to @dst_port. Does not take tunnel starting from @src_port and ending * from @src_port into account. * * If there is bandwidth reserved for any of the groups between * @src_port and @dst_port (but not yet used) that is also taken into * account in the returned consumed bandwidth. */ static int tb_consumed_dp_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, struct tb_port *port, int *consumed_up, int *consumed_down) { int group_reserved[MAX_GROUPS] = {}; struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; bool downstream; int i, ret; *consumed_up = *consumed_down = 0; /* * Find all DP tunnels that cross the port and reduce * their consumed bandwidth from the available. */ list_for_each_entry(tunnel, &tcm->tunnel_list, list) { const struct tb_bandwidth_group *group; int dp_consumed_up, dp_consumed_down; if (tb_tunnel_is_invalid(tunnel)) continue; if (!tb_tunnel_is_dp(tunnel)) continue; if (!tb_tunnel_port_on_path(tunnel, port)) continue; /* * Calculate what is reserved for groups crossing the * same ports only once (as that is reserved for all the * tunnels in the group). */ group = tunnel->src_port->group; if (group && group->reserved && !group_reserved[group->index]) group_reserved[group->index] = group->reserved; /* * Ignore the DP tunnel between src_port and dst_port * because it is the same tunnel and we may be * re-calculating estimated bandwidth. */ if (tunnel->src_port == src_port && tunnel->dst_port == dst_port) continue; ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up, &dp_consumed_down); if (ret) return ret; *consumed_up += dp_consumed_up; *consumed_down += dp_consumed_down; } downstream = tb_port_path_direction_downstream(src_port, dst_port); for (i = 0; i < ARRAY_SIZE(group_reserved); i++) { if (downstream) *consumed_down += group_reserved[i]; else *consumed_up += group_reserved[i]; } return 0; } static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port, struct tb_port *port) { bool downstream = tb_port_path_direction_downstream(src_port, dst_port); enum tb_link_width width; if (tb_is_upstream_port(port)) width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX; else width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX; return tb_port_width_supported(port, width); } /** * tb_maximum_bandwidth() - Maximum bandwidth over a single link * @tb: Domain structure * @src_port: Source protocol adapter * @dst_port: Destination protocol adapter * @port: USB4 port the total bandwidth is calculated * @max_up: Maximum upstream bandwidth (Mb/s) * @max_down: Maximum downstream bandwidth (Mb/s) * @include_asym: Include bandwidth if the link is switched from * symmetric to asymmetric * * Returns maximum possible bandwidth in @max_up and @max_down over a * single link at @port. If @include_asym is set then includes the * additional banwdith if the links are transitioned into asymmetric to * direction from @src_port to @dst_port. */ static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, struct tb_port *port, int *max_up, int *max_down, bool include_asym) { bool downstream = tb_port_path_direction_downstream(src_port, dst_port); int link_speed, link_width, up_bw, down_bw; /* * Can include asymmetric, only if it is actually supported by * the lane adapter. */ if (!tb_asym_supported(src_port, dst_port, port)) include_asym = false; if (tb_is_upstream_port(port)) { link_speed = port->sw->link_speed; /* * sw->link_width is from upstream perspective so we use * the opposite for downstream of the host router. */ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { up_bw = link_speed * 3 * 1000; down_bw = link_speed * 1 * 1000; } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; } else if (include_asym) { /* * The link is symmetric at the moment but we * can switch it to asymmetric as needed. Report * this bandwidth as available (even though it * is not yet enabled). */ if (downstream) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; } else { up_bw = link_speed * 3 * 1000; down_bw = link_speed * 1 * 1000; } } else { up_bw = link_speed * port->sw->link_width * 1000; down_bw = up_bw; } } else { link_speed = tb_port_get_link_speed(port); if (link_speed < 0) return link_speed; link_width = tb_port_get_link_width(port); if (link_width < 0) return link_width; if (link_width == TB_LINK_WIDTH_ASYM_TX) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { up_bw = link_speed * 3 * 1000; down_bw = link_speed * 1 * 1000; } else if (include_asym) { /* * The link is symmetric at the moment but we * can switch it to asymmetric as needed. Report * this bandwidth as available (even though it * is not yet enabled). */ if (downstream) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; } else { up_bw = link_speed * 3 * 1000; down_bw = link_speed * 1 * 1000; } } else { up_bw = link_speed * link_width * 1000; down_bw = up_bw; } } /* Leave 10% guard band */ *max_up = up_bw - up_bw / 10; *max_down = down_bw - down_bw / 10; tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down); return 0; } /** * tb_available_bandwidth() - Available bandwidth for tunneling * @tb: Domain structure * @src_port: Source protocol adapter * @dst_port: Destination protocol adapter * @available_up: Available bandwidth upstream (Mb/s) * @available_down: Available bandwidth downstream (Mb/s) * @include_asym: Include bandwidth if the link is switched from * symmetric to asymmetric * * Calculates maximum available bandwidth for protocol tunneling between * @src_port and @dst_port at the moment. This is minimum of maximum * link bandwidth across all links reduced by currently consumed * bandwidth on that link. * * If @include_asym is true then includes also bandwidth that can be * added when the links are transitioned into asymmetric (but does not * transition the links). */ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int *available_up, int *available_down, bool include_asym) { struct tb_port *port; int ret; /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ *available_up = *available_down = 120000; /* Find the minimum available bandwidth over all links */ tb_for_each_port_on_path(src_port, dst_port, port) { int max_up, max_down, consumed_up, consumed_down; if (!tb_port_is_null(port)) continue; ret = tb_maximum_bandwidth(tb, src_port, dst_port, port, &max_up, &max_down, include_asym); if (ret) return ret; ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port, port, &consumed_up, &consumed_down); if (ret) return ret; max_up -= consumed_up; max_down -= consumed_down; ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port, &consumed_up, &consumed_down); if (ret) return ret; max_up -= consumed_up; max_down -= consumed_down; if (max_up < *available_up) *available_up = max_up; if (max_down < *available_down) *available_down = max_down; } if (*available_up < 0) *available_up = 0; if (*available_down < 0) *available_down = 0; return 0; } static int tb_release_unused_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port) { struct tb_tunnel *tunnel; tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; } static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port) { int ret, available_up, available_down; struct tb_tunnel *tunnel; tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); if (!tunnel) return; tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n"); /* * Calculate available bandwidth for the first hop USB3 tunnel. * That determines the whole USB3 bandwidth for this branch. */ ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, &available_up, &available_down, false); if (ret) { tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n"); return; } tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up, available_down); tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); } static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) { struct tb_switch *parent = tb_switch_parent(sw); int ret, available_up, available_down; struct tb_port *up, *down, *port; struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; if (!tb_acpi_may_tunnel_usb3()) { tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); return 0; } up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); if (!up) return 0; if (!sw->link_usb4) return 0; /* * Look up available down port. Since we are chaining it should * be found right above this switch. */ port = tb_switch_downstream_port(sw); down = tb_find_usb3_down(parent, port); if (!down) return 0; if (tb_route(parent)) { struct tb_port *parent_up; /* * Check first that the parent switch has its upstream USB3 * port enabled. Otherwise the chain is not complete and * there is no point setting up a new tunnel. */ parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); if (!parent_up || !tb_port_is_enabled(parent_up)) return 0; /* Make all unused bandwidth available for the new tunnel */ ret = tb_release_unused_usb3_bandwidth(tb, down, up); if (ret) return ret; } ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down, false); if (ret) goto err_reclaim; tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", available_up, available_down); tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, available_down); if (!tunnel) { ret = -ENOMEM; goto err_reclaim; } if (tb_tunnel_activate(tunnel)) { tb_port_info(up, "USB3 tunnel activation failed, aborting\n"); ret = -EIO; goto err_free; } list_add_tail(&tunnel->list, &tcm->tunnel_list); if (tb_route(parent)) tb_reclaim_usb3_bandwidth(tb, down, up); return 0; err_free: tb_tunnel_free(tunnel); err_reclaim: if (tb_route(parent)) tb_reclaim_usb3_bandwidth(tb, down, up); return ret; } static int tb_create_usb3_tunnels(struct tb_switch *sw) { struct tb_port *port; int ret; if (!tb_acpi_may_tunnel_usb3()) return 0; if (tb_route(sw)) { ret = tb_tunnel_usb3(sw->tb, sw); if (ret) return ret; } tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port)) continue; ret = tb_create_usb3_tunnels(port->remote->sw); if (ret) return ret; } return 0; } /** * tb_configure_asym() - Transition links to asymmetric if needed * @tb: Domain structure * @src_port: Source adapter to start the transition * @dst_port: Destination adapter * @requested_up: Additional bandwidth (Mb/s) required upstream * @requested_down: Additional bandwidth (Mb/s) required downstream * * Transition links between @src_port and @dst_port into asymmetric, with * three lanes in the direction from @src_port towards @dst_port and one lane * in the opposite direction, if the bandwidth requirements * (requested + currently consumed) on that link exceed @asym_threshold. * * Must be called with available >= requested over all links. */ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int requested_up, int requested_down) { bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) sw = dst_port->sw; else sw = src_port->sw; tb_for_each_upstream_port_on_path(src_port, dst_port, up) { struct tb_port *down = tb_switch_downstream_port(up->sw); enum tb_link_width width_up, width_down; int consumed_up, consumed_down; ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, &consumed_up, &consumed_down); if (ret) break; if (downstream) { /* * Downstream so make sure upstream is within the 36G * (40G - guard band 10%), and the requested is above * what the threshold is. */ if (consumed_up + requested_up >= TB_ASYM_MIN) { ret = -ENOBUFS; break; } /* Does consumed + requested exceed the threshold */ if (consumed_down + requested_down < asym_threshold) continue; width_up = TB_LINK_WIDTH_ASYM_RX; width_down = TB_LINK_WIDTH_ASYM_TX; } else { /* Upstream, the opposite of above */ if (consumed_down + requested_down >= TB_ASYM_MIN) { ret = -ENOBUFS; break; } if (consumed_up + requested_up < asym_threshold) continue; width_up = TB_LINK_WIDTH_ASYM_TX; width_down = TB_LINK_WIDTH_ASYM_RX; } if (up->sw->link_width == width_up) continue; if (!tb_port_width_supported(up, width_up) || !tb_port_width_supported(down, width_down)) continue; /* * Disable CL states before doing any transitions. We * delayed it until now that we know there is a real * transition taking place. */ if (!clx_disabled) { clx = tb_disable_clx(sw); clx_disabled = true; } tb_sw_dbg(up->sw, "configuring asymmetric link\n"); /* * Here requested + consumed > threshold so we need to * transtion the link into asymmetric now. */ ret = tb_switch_set_link_width(up->sw, width_up); if (ret) { tb_sw_warn(up->sw, "failed to set link width\n"); break; } } /* Re-enable CL states if they were previosly enabled */ if (clx) tb_enable_clx(sw); return ret; } /** * tb_configure_sym() - Transition links to symmetric if possible * @tb: Domain structure * @src_port: Source adapter to start the transition * @dst_port: Destination adapter * @keep_asym: Keep asymmetric link if preferred * * Goes over each link from @src_port to @dst_port and tries to * transition the link to symmetric if the currently consumed bandwidth * allows and link asymmetric preference is ignored (if @keep_asym is %false). */ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, bool keep_asym) { bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) sw = dst_port->sw; else sw = src_port->sw; tb_for_each_upstream_port_on_path(src_port, dst_port, up) { int consumed_up, consumed_down; /* Already symmetric */ if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) continue; /* Unplugged, no need to switch */ if (up->sw->is_unplugged) continue; ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, &consumed_up, &consumed_down); if (ret) break; if (downstream) { /* * Downstream so we want the consumed_down < threshold. * Upstream traffic should be less than 36G (40G * guard band 10%) as the link was configured asymmetric * already. */ if (consumed_down >= asym_threshold) continue; } else { if (consumed_up >= asym_threshold) continue; } if (up->sw->link_width == TB_LINK_WIDTH_DUAL) continue; /* * Here consumed < threshold so we can transition the * link to symmetric. * * However, if the router prefers asymmetric link we * honor that (unless @keep_asym is %false). */ if (keep_asym && up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); continue; } /* Disable CL states before doing any transitions */ if (!clx_disabled) { clx = tb_disable_clx(sw); clx_disabled = true; } tb_sw_dbg(up->sw, "configuring symmetric link\n"); ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); if (ret) { tb_sw_warn(up->sw, "failed to set link width\n"); break; } } /* Re-enable CL states if they were previosly enabled */ if (clx) tb_enable_clx(sw); return ret; } static void tb_configure_link(struct tb_port *down, struct tb_port *up, struct tb_switch *sw) { struct tb *tb = sw->tb; /* Link the routers using both links if available */ down->remote = up; up->remote = down; if (down->dual_link_port && up->dual_link_port) { down->dual_link_port->remote = up->dual_link_port; up->dual_link_port->remote = down->dual_link_port; } /* * Enable lane bonding if the link is currently two single lane * links. */ if (sw->link_width < TB_LINK_WIDTH_DUAL) tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); /* * Device router that comes up as symmetric link is * connected deeper in the hierarchy, we transition the links * above into symmetric if bandwidth allows. */ if (tb_switch_depth(sw) > 1 && tb_port_get_link_generation(up) >= 4 && up->sw->link_width == TB_LINK_WIDTH_DUAL) { struct tb_port *host_port; host_port = tb_port_at(tb_route(sw), tb->root_switch); tb_configure_sym(tb, host_port, up, false); } /* Set the link configured */ tb_switch_configure_link(sw); } static void tb_scan_port(struct tb_port *port); /* * tb_scan_switch() - scan for and initialize downstream switches */ static void tb_scan_switch(struct tb_switch *sw) { struct tb_port *port; pm_runtime_get_sync(&sw->dev); tb_switch_for_each_port(sw, port) tb_scan_port(port); pm_runtime_mark_last_busy(&sw->dev); pm_runtime_put_autosuspend(&sw->dev); } /* * tb_scan_port() - check for and initialize switches below port */ static void tb_scan_port(struct tb_port *port) { struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_port *upstream_port; bool discovery = false; struct tb_switch *sw; if (tb_is_upstream_port(port)) return; if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && !tb_dp_port_is_enabled(port)) { tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, false); return; } if (port->config.type != TB_TYPE_PORT) return; if (port->dual_link_port && port->link_nr) return; /* * Downstream switch is reachable through two ports. * Only scan on the primary port (link_nr == 0). */ if (port->usb4) pm_runtime_get_sync(&port->usb4->dev); if (tb_wait_for_port(port, false) <= 0) goto out_rpm_put; if (port->remote) { tb_port_dbg(port, "port already has a remote\n"); goto out_rpm_put; } tb_retimer_scan(port, true); sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, tb_downstream_route(port)); if (IS_ERR(sw)) { /* * If there is an error accessing the connected switch * it may be connected to another domain. Also we allow * the other domain to be connected to a max depth switch. */ if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) tb_scan_xdomain(port); goto out_rpm_put; } if (tb_switch_configure(sw)) { tb_switch_put(sw); goto out_rpm_put; } /* * If there was previously another domain connected remove it * first. */ if (port->xdomain) { tb_xdomain_remove(port->xdomain); tb_port_unconfigure_xdomain(port); port->xdomain = NULL; } /* * Do not send uevents until we have discovered all existing * tunnels and know which switches were authorized already by * the boot firmware. */ if (!tcm->hotplug_active) { dev_set_uevent_suppress(&sw->dev, true); discovery = true; } /* * At the moment Thunderbolt 2 and beyond (devices with LC) we * can support runtime PM. */ sw->rpm = sw->generation > 1; if (tb_switch_add(sw)) { tb_switch_put(sw); goto out_rpm_put; } upstream_port = tb_upstream_port(sw); tb_configure_link(port, upstream_port, sw); /* * CL0s and CL1 are enabled and supported together. * Silently ignore CLx enabling in case CLx is not supported. */ if (discovery) tb_sw_dbg(sw, "discovery, not touching CL states\n"); else if (tb_enable_clx(sw)) tb_sw_warn(sw, "failed to enable CL states\n"); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to enable TMU\n"); /* * Configuration valid needs to be set after the TMU has been * enabled for the upstream port of the router so we do it here. */ tb_switch_configuration_valid(sw); /* Scan upstream retimers */ tb_retimer_scan(upstream_port, true); /* * Create USB 3.x tunnels only when the switch is plugged to the * domain. This is because we scan the domain also during discovery * and want to discover existing USB 3.x tunnels before we create * any new. */ if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) tb_sw_warn(sw, "USB3 tunnel creation failed\n"); tb_add_dp_resources(sw); tb_scan_switch(sw); out_rpm_put: if (port->usb4) { pm_runtime_mark_last_busy(&port->usb4->dev); pm_runtime_put_autosuspend(&port->usb4->dev); } } static void tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) { struct tb_tunnel *first_tunnel; struct tb *tb = group->tb; struct tb_port *in; int ret; tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n", group->index); first_tunnel = NULL; list_for_each_entry(in, &group->ports, group_list) { int estimated_bw, estimated_up, estimated_down; struct tb_tunnel *tunnel; struct tb_port *out; if (!usb4_dp_port_bandwidth_mode_enabled(in)) continue; tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); if (WARN_ON(!tunnel)) break; if (!first_tunnel) { /* * Since USB3 bandwidth is shared by all DP * tunnels under the host router USB4 port, even * if they do not begin from the host router, we * can release USB3 bandwidth just once and not * for each tunnel separately. */ first_tunnel = tunnel; ret = tb_release_unused_usb3_bandwidth(tb, first_tunnel->src_port, first_tunnel->dst_port); if (ret) { tb_tunnel_warn(tunnel, "failed to release unused bandwidth\n"); break; } } out = tunnel->dst_port; ret = tb_available_bandwidth(tb, in, out, &estimated_up, &estimated_down, true); if (ret) { tb_tunnel_warn(tunnel, "failed to re-calculate estimated bandwidth\n"); break; } /* * Estimated bandwidth includes: * - already allocated bandwidth for the DP tunnel * - available bandwidth along the path * - bandwidth allocated for USB 3.x but not used. */ if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = estimated_down; else estimated_bw = estimated_up; /* * If there is reserved bandwidth for the group that is * not yet released we report that too. */ tb_tunnel_dbg(tunnel, "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n", estimated_bw, group->reserved, estimated_bw + group->reserved); if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw + group->reserved)) tb_tunnel_warn(tunnel, "failed to update estimated bandwidth\n"); } if (first_tunnel) tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port, first_tunnel->dst_port); tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index); } static void tb_recalc_estimated_bandwidth(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); int i; tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n"); for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { struct tb_bandwidth_group *group = &tcm->groups[i]; if (!list_empty(&group->ports)) tb_recalc_estimated_bandwidth_for_group(group); } tb_dbg(tb, "bandwidth re-calculation done\n"); } static bool __release_group_bandwidth(struct tb_bandwidth_group *group) { if (group->reserved) { tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index, group->reserved); group->reserved = 0; return true; } return false; } static void __configure_group_sym(struct tb_bandwidth_group *group) { struct tb_tunnel *tunnel; struct tb_port *in; if (list_empty(&group->ports)) return; /* * All the tunnels in the group go through the same USB4 links * so we find the first one here and pass the IN and OUT * adapters to tb_configure_sym() which now transitions the * links back to symmetric if bandwidth requirement < asym_threshold. * * We do this here to avoid unnecessary transitions (for example * if the graphics released bandwidth for other tunnel in the * same group). */ in = list_first_entry(&group->ports, struct tb_port, group_list); tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL); if (tunnel) tb_configure_sym(group->tb, in, tunnel->dst_port, true); } static void tb_bandwidth_group_release_work(struct work_struct *work) { struct tb_bandwidth_group *group = container_of(work, typeof(*group), release_work.work); struct tb *tb = group->tb; mutex_lock(&tb->lock); if (__release_group_bandwidth(group)) tb_recalc_estimated_bandwidth(tb); __configure_group_sym(group); mutex_unlock(&tb->lock); } static void tb_init_bandwidth_groups(struct tb_cm *tcm) { int i; for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { struct tb_bandwidth_group *group = &tcm->groups[i]; group->tb = tcm_to_tb(tcm); group->index = i + 1; INIT_LIST_HEAD(&group->ports); INIT_DELAYED_WORK(&group->release_work, tb_bandwidth_group_release_work); } } static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group, struct tb_port *in) { if (!group || WARN_ON(in->group)) return; in->group = group; list_add_tail(&in->group_list, &group->ports); tb_port_dbg(in, "attached to bandwidth group %d\n", group->index); } static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) { int i; for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { struct tb_bandwidth_group *group = &tcm->groups[i]; if (list_empty(&group->ports)) return group; } return NULL; } static struct tb_bandwidth_group * tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, struct tb_port *out) { struct tb_bandwidth_group *group; struct tb_tunnel *tunnel; /* * Find all DP tunnels that go through all the same USB4 links * as this one. Because we always setup tunnels the same way we * can just check for the routers at both ends of the tunnels * and if they are the same we have a match. */ list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (!tb_tunnel_is_dp(tunnel)) continue; if (tunnel->src_port->sw == in->sw && tunnel->dst_port->sw == out->sw) { group = tunnel->src_port->group; if (group) { tb_bandwidth_group_attach_port(group, in); return group; } } } /* Pick up next available group then */ group = tb_find_free_bandwidth_group(tcm); if (group) tb_bandwidth_group_attach_port(group, in); else tb_port_warn(in, "no available bandwidth groups\n"); return group; } static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, struct tb_port *out) { if (usb4_dp_port_bandwidth_mode_enabled(in)) { int index, i; index = usb4_dp_port_group_id(in); for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { if (tcm->groups[i].index == index) { tb_bandwidth_group_attach_port(&tcm->groups[i], in); return; } } } tb_attach_bandwidth_group(tcm, in, out); } static void tb_detach_bandwidth_group(struct tb_port *in) { struct tb_bandwidth_group *group = in->group; if (group) { in->group = NULL; list_del_init(&in->group_list); tb_port_dbg(in, "detached from bandwidth group %d\n", group->index); /* No more tunnels so release the reserved bandwidth if any */ if (list_empty(&group->ports)) { cancel_delayed_work(&group->release_work); __release_group_bandwidth(group); } } } static void tb_discover_tunnels(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_pci(tunnel)) { struct tb_switch *parent = tunnel->dst_port->sw; while (parent != tunnel->src_port->sw) { parent->boot = true; parent = tb_switch_parent(parent); } } else if (tb_tunnel_is_dp(tunnel)) { struct tb_port *in = tunnel->src_port; struct tb_port *out = tunnel->dst_port; /* Keep the domain from powering down */ pm_runtime_get_sync(&in->sw->dev); pm_runtime_get_sync(&out->sw->dev); tb_discover_bandwidth_group(tcm, in, out); } } } static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) { struct tb_port *src_port, *dst_port; struct tb *tb; if (!tunnel) return; tb_tunnel_deactivate(tunnel); list_del(&tunnel->list); tb = tunnel->tb; src_port = tunnel->src_port; dst_port = tunnel->dst_port; switch (tunnel->type) { case TB_TUNNEL_DP: tb_detach_bandwidth_group(src_port); /* * In case of DP tunnel make sure the DP IN resource is * deallocated properly. */ tb_switch_dealloc_dp_resource(src_port->sw, src_port); /* * If bandwidth on a link is < asym_threshold * transition the link to symmetric. */ tb_configure_sym(tb, src_port, dst_port, true); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); pm_runtime_mark_last_busy(&src_port->sw->dev); pm_runtime_put_autosuspend(&src_port->sw->dev); fallthrough; case TB_TUNNEL_USB3: tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); break; default: /* * PCIe and DMA tunnels do not consume guaranteed * bandwidth. */ break; } tb_tunnel_free(tunnel); } /* * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away */ static void tb_free_invalid_tunnels(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; struct tb_tunnel *n; list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { if (tb_tunnel_is_invalid(tunnel)) tb_deactivate_and_free_tunnel(tunnel); } } /* * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches */ static void tb_free_unplugged_children(struct tb_switch *sw) { struct tb_port *port; tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port)) continue; if (port->remote->sw->is_unplugged) { tb_retimer_remove_all(port); tb_remove_dp_resources(port->remote->sw); tb_switch_unconfigure_link(port->remote->sw); tb_switch_set_link_width(port->remote->sw, TB_LINK_WIDTH_SINGLE); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) port->dual_link_port->remote = NULL; } else { tb_free_unplugged_children(port->remote->sw); } } } static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, const struct tb_port *port) { struct tb_port *down = NULL; /* * To keep plugging devices consistently in the same PCIe * hierarchy, do mapping here for switch downstream PCIe ports. */ if (tb_switch_is_usb4(sw)) { down = usb4_switch_map_pcie_down(sw, port); } else if (!tb_route(sw)) { int phy_port = tb_phy_port_from_link(port->port); int index; /* * Hard-coded Thunderbolt port to PCIe down port mapping * per controller. */ if (tb_switch_is_cactus_ridge(sw) || tb_switch_is_alpine_ridge(sw)) index = !phy_port ? 6 : 7; else if (tb_switch_is_falcon_ridge(sw)) index = !phy_port ? 6 : 8; else if (tb_switch_is_titan_ridge(sw)) index = !phy_port ? 8 : 9; else goto out; /* Validate the hard-coding */ if (WARN_ON(index > sw->config.max_port_number)) goto out; down = &sw->ports[index]; } if (down) { if (WARN_ON(!tb_port_is_pcie_down(down))) goto out; if (tb_pci_port_is_enabled(down)) goto out; return down; } out: return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); } static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) { struct tb_port *host_port, *port; struct tb_cm *tcm = tb_priv(tb); host_port = tb_route(in->sw) ? tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; list_for_each_entry(port, &tcm->dp_resources, list) { if (!tb_port_is_dpout(port)) continue; if (tb_port_is_enabled(port)) { tb_port_dbg(port, "DP OUT in use\n"); continue; } /* Needs to be on different routers */ if (in->sw == port->sw) { tb_port_dbg(port, "skipping DP OUT on same router\n"); continue; } tb_port_dbg(port, "DP OUT available\n"); /* * Keep the DP tunnel under the topology starting from * the same host router downstream port. */ if (host_port && tb_route(port->sw)) { struct tb_port *p; p = tb_port_at(tb_route(port->sw), tb->root_switch); if (p != host_port) continue; } return port; } return NULL; } static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in, struct tb_port *out) { int available_up, available_down, ret, link_nr; struct tb_cm *tcm = tb_priv(tb); int consumed_up, consumed_down; struct tb_tunnel *tunnel; /* * This is only applicable to links that are not bonded (so * when Thunderbolt 1 hardware is involved somewhere in the * topology). For these try to share the DP bandwidth between * the two lanes. */ link_nr = 1; list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_dp(tunnel)) { link_nr = 0; break; } } /* * DP stream needs the domain to be active so runtime resume * both ends of the tunnel. * * This should bring the routers in the middle active as well * and keeps the domain from runtime suspending while the DP * tunnel is active. */ pm_runtime_get_sync(&in->sw->dev); pm_runtime_get_sync(&out->sw->dev); if (tb_switch_alloc_dp_resource(in->sw, in)) { tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); goto err_rpm_put; } if (!tb_attach_bandwidth_group(tcm, in, out)) goto err_dealloc_dp; /* Make all unused USB3 bandwidth available for the new DP tunnel */ ret = tb_release_unused_usb3_bandwidth(tb, in, out); if (ret) { tb_warn(tb, "failed to release unused bandwidth\n"); goto err_detach_group; } ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, true); if (ret) goto err_reclaim_usb; tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", available_up, available_down); tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, available_down); if (!tunnel) { tb_port_dbg(out, "could not allocate DP tunnel\n"); goto err_reclaim_usb; } if (tb_tunnel_activate(tunnel)) { tb_port_info(out, "DP tunnel activation failed, aborting\n"); goto err_free; } /* If fail reading tunnel's consumed bandwidth, tear it down */ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down); if (ret) goto err_deactivate; list_add_tail(&tunnel->list, &tcm->tunnel_list); tb_reclaim_usb3_bandwidth(tb, in, out); /* * Transition the links to asymmetric if the consumption exceeds * the threshold. */ tb_configure_asym(tb, in, out, consumed_up, consumed_down); /* Update the domain with the new bandwidth estimation */ tb_recalc_estimated_bandwidth(tb); /* * In case of DP tunnel exists, change host router's 1st children * TMU mode to HiFi for CL0s to work. */ tb_increase_tmu_accuracy(tunnel); return true; err_deactivate: tb_tunnel_deactivate(tunnel); err_free: tb_tunnel_free(tunnel); err_reclaim_usb: tb_reclaim_usb3_bandwidth(tb, in, out); err_detach_group: tb_detach_bandwidth_group(in); err_dealloc_dp: tb_switch_dealloc_dp_resource(in->sw, in); err_rpm_put: pm_runtime_mark_last_busy(&out->sw->dev); pm_runtime_put_autosuspend(&out->sw->dev); pm_runtime_mark_last_busy(&in->sw->dev); pm_runtime_put_autosuspend(&in->sw->dev); return false; } static void tb_tunnel_dp(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *port, *in, *out; if (!tb_acpi_may_tunnel_dp()) { tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); return; } /* * Find pair of inactive DP IN and DP OUT adapters and then * establish a DP tunnel between them. */ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); in = NULL; out = NULL; list_for_each_entry(port, &tcm->dp_resources, list) { if (!tb_port_is_dpin(port)) continue; if (tb_port_is_enabled(port)) { tb_port_dbg(port, "DP IN in use\n"); continue; } in = port; tb_port_dbg(in, "DP IN available\n"); out = tb_find_dp_out(tb, port); if (out) tb_tunnel_one_dp(tb, in, out); else tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n"); } if (!in) tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); } static void tb_enter_redrive(struct tb_port *port) { struct tb_switch *sw = port->sw; if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) return; /* * If we get hot-unplug for the DP IN port of the host router * and the DP resource is not available anymore it means there * is a monitor connected directly to the Type-C port and we are * in "redrive" mode. For this to work we cannot enter RTD3 so * we bump up the runtime PM reference count here. */ if (!tb_port_is_dpin(port)) return; if (tb_route(sw)) return; if (!tb_switch_query_dp_resource(sw, port)) { port->redrive = true; pm_runtime_get(&sw->dev); tb_port_dbg(port, "enter redrive mode, keeping powered\n"); } } static void tb_exit_redrive(struct tb_port *port) { struct tb_switch *sw = port->sw; if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) return; if (!tb_port_is_dpin(port)) return; if (tb_route(sw)) return; if (port->redrive && tb_switch_query_dp_resource(sw, port)) { port->redrive = false; pm_runtime_put(&sw->dev); tb_port_dbg(port, "exit redrive mode\n"); } } static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) { struct tb_port *in, *out; struct tb_tunnel *tunnel; if (tb_port_is_dpin(port)) { tb_port_dbg(port, "DP IN resource unavailable\n"); in = port; out = NULL; } else { tb_port_dbg(port, "DP OUT resource unavailable\n"); in = NULL; out = port; } tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); if (tunnel) tb_deactivate_and_free_tunnel(tunnel); else tb_enter_redrive(port); list_del_init(&port->list); /* * See if there is another DP OUT port that can be used for * to create another tunnel. */ tb_recalc_estimated_bandwidth(tb); tb_tunnel_dp(tb); } static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *p; if (tb_port_is_enabled(port)) return; list_for_each_entry(p, &tcm->dp_resources, list) { if (p == port) return; } tb_port_dbg(port, "DP %s resource available after hotplug\n", tb_port_is_dpin(port) ? "IN" : "OUT"); list_add_tail(&port->list, &tcm->dp_resources); tb_exit_redrive(port); /* Look for suitable DP IN <-> DP OUT pairs now */ tb_tunnel_dp(tb); } static void tb_disconnect_and_release_dp(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel, *n; /* * Tear down all DP tunnels and release their resources. They * will be re-established after resume based on plug events. */ list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { if (tb_tunnel_is_dp(tunnel)) tb_deactivate_and_free_tunnel(tunnel); } while (!list_empty(&tcm->dp_resources)) { struct tb_port *port; port = list_first_entry(&tcm->dp_resources, struct tb_port, list); list_del_init(&port->list); } } static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) { struct tb_tunnel *tunnel; struct tb_port *up; up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); if (WARN_ON(!up)) return -ENODEV; tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); if (WARN_ON(!tunnel)) return -ENODEV; tb_switch_xhci_disconnect(sw); tb_tunnel_deactivate(tunnel); list_del(&tunnel->list); tb_tunnel_free(tunnel); return 0; } static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) { struct tb_port *up, *down, *port; struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); if (!up) return 0; /* * Look up available down port. Since we are chaining it should * be found right above this switch. */ port = tb_switch_downstream_port(sw); down = tb_find_pcie_down(tb_switch_parent(sw), port); if (!down) return 0; tunnel = tb_tunnel_alloc_pci(tb, up, down); if (!tunnel) return -ENOMEM; if (tb_tunnel_activate(tunnel)) { tb_port_info(up, "PCIe tunnel activation failed, aborting\n"); tb_tunnel_free(tunnel); return -EIO; } /* * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it * here. */ if (tb_switch_pcie_l1_enable(sw)) tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); if (tb_switch_xhci_connect(sw)) tb_sw_warn(sw, "failed to connect xHCI\n"); list_add_tail(&tunnel->list, &tcm->tunnel_list); return 0; } static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *nhi_port, *dst_port; struct tb_tunnel *tunnel; struct tb_switch *sw; int ret; sw = tb_to_switch(xd->dev.parent); dst_port = tb_port_at(xd->route, sw); nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); mutex_lock(&tb->lock); /* * When tunneling DMA paths the link should not enter CL states * so disable them now. */ tb_disable_clx(sw); tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, transmit_ring, receive_path, receive_ring); if (!tunnel) { ret = -ENOMEM; goto err_clx; } if (tb_tunnel_activate(tunnel)) { tb_port_info(nhi_port, "DMA tunnel activation failed, aborting\n"); ret = -EIO; goto err_free; } list_add_tail(&tunnel->list, &tcm->tunnel_list); mutex_unlock(&tb->lock); return 0; err_free: tb_tunnel_free(tunnel); err_clx: tb_enable_clx(sw); mutex_unlock(&tb->lock); return ret; } static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *nhi_port, *dst_port; struct tb_tunnel *tunnel, *n; struct tb_switch *sw; sw = tb_to_switch(xd->dev.parent); dst_port = tb_port_at(xd->route, sw); nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { if (!tb_tunnel_is_dma(tunnel)) continue; if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) continue; if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, receive_path, receive_ring)) tb_deactivate_and_free_tunnel(tunnel); } /* * Try to re-enable CL states now, it is OK if this fails * because we may still have another DMA tunnel active through * the same host router USB4 downstream port. */ tb_enable_clx(sw); } static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring) { if (!xd->is_unplugged) { mutex_lock(&tb->lock); __tb_disconnect_xdomain_paths(tb, xd, transmit_path, transmit_ring, receive_path, receive_ring); mutex_unlock(&tb->lock); } return 0; } /* hotplug handling */ /* * tb_handle_hotplug() - handle hotplug event * * Executes on tb->wq. */ static void tb_handle_hotplug(struct work_struct *work) { struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); struct tb *tb = ev->tb; struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw; struct tb_port *port; /* Bring the domain back from sleep if it was suspended */ pm_runtime_get_sync(&tb->dev); mutex_lock(&tb->lock); if (!tcm->hotplug_active) goto out; /* during init, suspend or shutdown */ sw = tb_switch_find_by_route(tb, ev->route); if (!sw) { tb_warn(tb, "hotplug event from non existent switch %llx:%x (unplug: %d)\n", ev->route, ev->port, ev->unplug); goto out; } if (ev->port > sw->config.max_port_number) { tb_warn(tb, "hotplug event from non existent port %llx:%x (unplug: %d)\n", ev->route, ev->port, ev->unplug); goto put_sw; } port = &sw->ports[ev->port]; if (tb_is_upstream_port(port)) { tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", ev->route, ev->port, ev->unplug); goto put_sw; } pm_runtime_get_sync(&sw->dev); if (ev->unplug) { tb_retimer_remove_all(port); if (tb_port_has_remote(port)) { tb_port_dbg(port, "switch unplugged\n"); tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); tb_remove_dp_resources(port->remote->sw); tb_switch_tmu_disable(port->remote->sw); tb_switch_unconfigure_link(port->remote->sw); tb_switch_set_link_width(port->remote->sw, TB_LINK_WIDTH_SINGLE); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) port->dual_link_port->remote = NULL; /* Maybe we can create another DP tunnel */ tb_recalc_estimated_bandwidth(tb); tb_tunnel_dp(tb); } else if (port->xdomain) { struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); tb_port_dbg(port, "xdomain unplugged\n"); /* * Service drivers are unbound during * tb_xdomain_remove() so setting XDomain as * unplugged here prevents deadlock if they call * tb_xdomain_disable_paths(). We will tear down * all the tunnels below. */ xd->is_unplugged = true; tb_xdomain_remove(xd); port->xdomain = NULL; __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); tb_xdomain_put(xd); tb_port_unconfigure_xdomain(port); } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { tb_dp_resource_unavailable(tb, port); } else if (!port->port) { tb_sw_dbg(sw, "xHCI disconnect request\n"); tb_switch_xhci_disconnect(sw); } else { tb_port_dbg(port, "got unplug event for disconnected port, ignoring\n"); } } else if (port->remote) { tb_port_dbg(port, "got plug event for connected port, ignoring\n"); } else if (!port->port && sw->authorized) { tb_sw_dbg(sw, "xHCI connect request\n"); tb_switch_xhci_connect(sw); } else { if (tb_port_is_null(port)) { tb_port_dbg(port, "hotplug: scanning\n"); tb_scan_port(port); if (!port->remote) tb_port_dbg(port, "hotplug: no switch found\n"); } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { tb_dp_resource_available(tb, port); } } pm_runtime_mark_last_busy(&sw->dev); pm_runtime_put_autosuspend(&sw->dev); put_sw: tb_switch_put(sw); out: mutex_unlock(&tb->lock); pm_runtime_mark_last_busy(&tb->dev); pm_runtime_put_autosuspend(&tb->dev); kfree(ev); } static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, int *requested_down) { int allocated_up, allocated_down, available_up, available_down, ret; int requested_up_corrected, requested_down_corrected, granularity; int max_up, max_down, max_up_rounded, max_down_rounded; struct tb_bandwidth_group *group; struct tb *tb = tunnel->tb; struct tb_port *in, *out; bool downstream; ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); if (ret) return ret; in = tunnel->src_port; out = tunnel->dst_port; tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n", allocated_up, allocated_down); /* * If we get rounded up request from graphics side, say HBR2 x 4 * that is 17500 instead of 17280 (this is because of the * granularity), we allow it too. Here the graphics has already * negotiated with the DPRX the maximum possible rates (which is * 17280 in this case). * * Since the link cannot go higher than 17280 we use that in our * calculations but the DP IN adapter Allocated BW write must be * the same value (17500) otherwise the adapter will mark it as * failed for graphics. */ ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); if (ret) goto fail; ret = usb4_dp_port_granularity(in); if (ret < 0) goto fail; granularity = ret; max_up_rounded = roundup(max_up, granularity); max_down_rounded = roundup(max_down, granularity); /* * This will "fix" the request down to the maximum supported * rate * lanes if it is at the maximum rounded up level. */ requested_up_corrected = *requested_up; if (requested_up_corrected == max_up_rounded) requested_up_corrected = max_up; else if (requested_up_corrected < 0) requested_up_corrected = 0; requested_down_corrected = *requested_down; if (requested_down_corrected == max_down_rounded) requested_down_corrected = max_down; else if (requested_down_corrected < 0) requested_down_corrected = 0; tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n", requested_up_corrected, requested_down_corrected); if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) || (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) { tb_tunnel_dbg(tunnel, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n", requested_up_corrected, requested_down_corrected, max_up_rounded, max_down_rounded); ret = -ENOBUFS; goto fail; } downstream = tb_tunnel_direction_downstream(tunnel); group = in->group; if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { if (tunnel->bw_mode) { int reserved; /* * If requested bandwidth is less or equal than * what is currently allocated to that tunnel we * simply change the reservation of the tunnel * and add the released bandwidth for the group * for the next 10s. Then we release it for * others to use. */ if (downstream) reserved = allocated_down - *requested_down; else reserved = allocated_up - *requested_up; if (reserved > 0) { group->reserved += reserved; tb_dbg(tb, "group %d reserved %d total %d Mb/s\n", group->index, reserved, group->reserved); /* * If it was not already pending, * schedule release now. If it is then * postpone it for the next 10s (unless * it is already running in which case * the 10s already expired and we should * give the reserved back to others). */ mod_delayed_work(system_wq, &group->release_work, msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT)); } } return tb_tunnel_alloc_bandwidth(tunnel, requested_up, requested_down); } /* * More bandwidth is requested. Release all the potential * bandwidth from USB3 first. */ ret = tb_release_unused_usb3_bandwidth(tb, in, out); if (ret) goto fail; /* * Then go over all tunnels that cross the same USB4 ports (they * are also in the same group but we use the same function here * that we use with the normal bandwidth allocation). */ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, true); if (ret) goto reclaim; tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n", available_up, available_down, group->reserved); if ((*requested_up >= 0 && available_up + group->reserved >= requested_up_corrected) || (*requested_down >= 0 && available_down + group->reserved >= requested_down_corrected)) { int released = 0; /* * If bandwidth on a link is >= asym_threshold * transition the link to asymmetric. */ ret = tb_configure_asym(tb, in, out, *requested_up, *requested_down); if (ret) { tb_configure_sym(tb, in, out, true); goto fail; } ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, requested_down); if (ret) { tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); tb_configure_sym(tb, in, out, true); } if (downstream) { if (*requested_down > available_down) released = *requested_down - available_down; } else { if (*requested_up > available_up) released = *requested_up - available_up; } if (released) { group->reserved -= released; tb_dbg(tb, "group %d released %d total %d Mb/s\n", group->index, released, group->reserved); } } else { ret = -ENOBUFS; } reclaim: tb_reclaim_usb3_bandwidth(tb, in, out); fail: if (ret && ret != -ENODEV) { /* * Write back the same allocated (so no change), this * makes the DPTX request fail on graphics side. */ tb_tunnel_dbg(tunnel, "failing the request by rewriting allocated %d/%d Mb/s\n", allocated_up, allocated_down); tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down); } return ret; } static void tb_handle_dp_bandwidth_request(struct work_struct *work) { struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); int requested_bw, requested_up, requested_down, ret; struct tb_tunnel *tunnel; struct tb *tb = ev->tb; struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw; struct tb_port *in; pm_runtime_get_sync(&tb->dev); mutex_lock(&tb->lock); if (!tcm->hotplug_active) goto unlock; sw = tb_switch_find_by_route(tb, ev->route); if (!sw) { tb_warn(tb, "bandwidth request from non-existent router %llx\n", ev->route); goto unlock; } in = &sw->ports[ev->port]; if (!tb_port_is_dpin(in)) { tb_port_warn(in, "bandwidth request to non-DP IN adapter\n"); goto put_sw; } tb_port_dbg(in, "handling bandwidth allocation request\n"); tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); if (!tunnel) { tb_port_warn(in, "failed to find tunnel\n"); goto put_sw; } if (!usb4_dp_port_bandwidth_mode_enabled(in)) { if (tunnel->bw_mode) { /* * Reset the tunnel back to use the legacy * allocation. */ tunnel->bw_mode = false; tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n"); } else { tb_port_warn(in, "bandwidth allocation mode not enabled\n"); } goto put_sw; } ret = usb4_dp_port_requested_bandwidth(in); if (ret < 0) { if (ret == -ENODATA) { /* * There is no request active so this means the * BW allocation mode was enabled from graphics * side. At this point we know that the graphics * driver has read the DRPX capabilities so we * can offer an better bandwidth estimatation. */ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n"); tb_recalc_estimated_bandwidth(tb); } else { tb_port_warn(in, "failed to read requested bandwidth\n"); } goto put_sw; } requested_bw = ret; tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); if (tb_tunnel_direction_downstream(tunnel)) { requested_up = -1; requested_down = requested_bw; } else { requested_up = requested_bw; requested_down = -1; } ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); if (ret) { if (ret == -ENOBUFS) tb_tunnel_warn(tunnel, "not enough bandwidth available\n"); else tb_tunnel_warn(tunnel, "failed to change bandwidth allocation\n"); } else { tb_tunnel_dbg(tunnel, "bandwidth allocation changed to %d/%d Mb/s\n", requested_up, requested_down); /* Update other clients about the allocation change */ tb_recalc_estimated_bandwidth(tb); } put_sw: tb_switch_put(sw); unlock: mutex_unlock(&tb->lock); pm_runtime_mark_last_busy(&tb->dev); pm_runtime_put_autosuspend(&tb->dev); kfree(ev); } static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) { struct tb_hotplug_event *ev; ev = kmalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return; ev->tb = tb; ev->route = route; ev->port = port; INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); queue_work(tb->wq, &ev->work); } static void tb_handle_notification(struct tb *tb, u64 route, const struct cfg_error_pkg *error) { switch (error->error) { case TB_CFG_ERROR_PCIE_WAKE: case TB_CFG_ERROR_DP_CON_CHANGE: case TB_CFG_ERROR_DPTX_DISCOVERY: if (tb_cfg_ack_notification(tb->ctl, route, error)) tb_warn(tb, "could not ack notification on %llx\n", route); break; case TB_CFG_ERROR_DP_BW: if (tb_cfg_ack_notification(tb->ctl, route, error)) tb_warn(tb, "could not ack notification on %llx\n", route); tb_queue_dp_bandwidth_request(tb, route, error->port); break; default: /* Ignore for now */ break; } } /* * tb_schedule_hotplug_handler() - callback function for the control channel * * Delegates to tb_handle_hotplug. */ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, const void *buf, size_t size) { const struct cfg_event_pkg *pkg = buf; u64 route = tb_cfg_get_route(&pkg->header); switch (type) { case TB_CFG_PKG_ERROR: tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf); return; case TB_CFG_PKG_EVENT: break; default: tb_warn(tb, "unexpected event %#x, ignoring\n", type); return; } if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { tb_warn(tb, "could not ack plug event on %llx:%x\n", route, pkg->port); } tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); } static void tb_stop(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; struct tb_tunnel *n; cancel_delayed_work(&tcm->remove_work); /* tunnels are only present after everything has been initialized */ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { /* * DMA tunnels require the driver to be functional so we * tear them down. Other protocol tunnels can be left * intact. */ if (tb_tunnel_is_dma(tunnel)) tb_tunnel_deactivate(tunnel); tb_tunnel_free(tunnel); } tb_switch_remove(tb->root_switch); tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ } static void tb_deinit(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); int i; /* Cancel all the release bandwidth workers */ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) cancel_delayed_work_sync(&tcm->groups[i].release_work); } static int tb_scan_finalize_switch(struct device *dev, void *data) { if (tb_is_switch(dev)) { struct tb_switch *sw = tb_to_switch(dev); /* * If we found that the switch was already setup by the * boot firmware, mark it as authorized now before we * send uevent to userspace. */ if (sw->boot) sw->authorized = 1; dev_set_uevent_suppress(dev, false); kobject_uevent(&dev->kobj, KOBJ_ADD); device_for_each_child(dev, NULL, tb_scan_finalize_switch); } return 0; } static int tb_start(struct tb *tb, bool reset) { struct tb_cm *tcm = tb_priv(tb); bool discover = true; int ret; tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); if (IS_ERR(tb->root_switch)) return PTR_ERR(tb->root_switch); /* * ICM firmware upgrade needs running firmware and in native * mode that is not available so disable firmware upgrade of the * root switch. * * However, USB4 routers support NVM firmware upgrade if they * implement the necessary router operations. */ tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch); /* All USB4 routers support runtime PM */ tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); ret = tb_switch_configure(tb->root_switch); if (ret) { tb_switch_put(tb->root_switch); return ret; } /* Announce the switch to the world */ ret = tb_switch_add(tb->root_switch); if (ret) { tb_switch_put(tb->root_switch); return ret; } /* * To support highest CLx state, we set host router's TMU to * Normal mode. */ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); /* Enable TMU if it is off */ tb_switch_tmu_enable(tb->root_switch); /* * Boot firmware might have created tunnels of its own. Since we * cannot be sure they are usable for us, tear them down and * reset the ports to handle it as new hotplug for USB4 v1 * routers (for USB4 v2 and beyond we already do host reset). */ if (reset && tb_switch_is_usb4(tb->root_switch)) { discover = false; if (usb4_switch_version(tb->root_switch) == 1) tb_switch_reset(tb->root_switch); } if (discover) { /* Full scan to discover devices added before the driver was loaded. */ tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ tb_discover_tunnels(tb); /* Add DP resources from the DP tunnels created by the boot firmware */ tb_discover_dp_resources(tb); } /* * If the boot firmware did not create USB 3.x tunnels create them * now for the whole topology. */ tb_create_usb3_tunnels(tb->root_switch); /* Add DP IN resources for the root switch */ tb_add_dp_resources(tb->root_switch); /* Make the discovered switches available to the userspace */ device_for_each_child(&tb->root_switch->dev, NULL, tb_scan_finalize_switch); /* Allow tb_handle_hotplug to progress events */ tcm->hotplug_active = true; return 0; } static int tb_suspend_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); tb_dbg(tb, "suspending...\n"); tb_disconnect_and_release_dp(tb); tb_switch_suspend(tb->root_switch, false); tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ tb_dbg(tb, "suspend finished\n"); return 0; } static void tb_restore_children(struct tb_switch *sw) { struct tb_port *port; /* No need to restore if the router is already unplugged */ if (sw->is_unplugged) return; if (tb_enable_clx(sw)) tb_sw_warn(sw, "failed to re-enable CL states\n"); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to restore TMU configuration\n"); tb_switch_configuration_valid(sw); tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port) && !port->xdomain) continue; if (port->remote) { tb_switch_set_link_width(port->remote->sw, port->remote->sw->link_width); tb_switch_configure_link(port->remote->sw); tb_restore_children(port->remote->sw); } else if (port->xdomain) { tb_port_configure_xdomain(port, port->xdomain); } } } static int tb_resume_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel, *n; unsigned int usb3_delay = 0; LIST_HEAD(tunnels); tb_dbg(tb, "resuming...\n"); /* * For non-USB4 hosts (Apple systems) remove any PCIe devices * the firmware might have setup. */ if (!tb_switch_is_usb4(tb->root_switch)) tb_switch_reset(tb->root_switch); tb_switch_resume(tb->root_switch, false); tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); tb_restore_children(tb->root_switch); /* * If we get here from suspend to disk the boot firmware or the * restore kernel might have created tunnels of its own. Since * we cannot be sure they are usable for us we find and tear * them down. */ tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { if (tb_tunnel_is_usb3(tunnel)) usb3_delay = 500; tb_tunnel_deactivate(tunnel); tb_tunnel_free(tunnel); } /* Re-create our tunnels now */ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { /* USB3 requires delay before it can be re-activated */ if (tb_tunnel_is_usb3(tunnel)) { msleep(usb3_delay); /* Only need to do it once */ usb3_delay = 0; } tb_tunnel_restart(tunnel); } if (!list_empty(&tcm->tunnel_list)) { /* * the pcie links need some time to get going. * 100ms works for me... */ tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); msleep(100); } /* Allow tb_handle_hotplug to progress events */ tcm->hotplug_active = true; tb_dbg(tb, "resume finished\n"); return 0; } static int tb_free_unplugged_xdomains(struct tb_switch *sw) { struct tb_port *port; int ret = 0; tb_switch_for_each_port(sw, port) { if (tb_is_upstream_port(port)) continue; if (port->xdomain && port->xdomain->is_unplugged) { tb_retimer_remove_all(port); tb_xdomain_remove(port->xdomain); tb_port_unconfigure_xdomain(port); port->xdomain = NULL; ret++; } else if (port->remote) { ret += tb_free_unplugged_xdomains(port->remote->sw); } } return ret; } static int tb_freeze_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); tcm->hotplug_active = false; return 0; } static int tb_thaw_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); tcm->hotplug_active = true; return 0; } static void tb_complete(struct tb *tb) { /* * Release any unplugged XDomains and if there is a case where * another domain is swapped in place of unplugged XDomain we * need to run another rescan. */ mutex_lock(&tb->lock); if (tb_free_unplugged_xdomains(tb->root_switch)) tb_scan_switch(tb->root_switch); mutex_unlock(&tb->lock); } static int tb_runtime_suspend(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); mutex_lock(&tb->lock); tb_switch_suspend(tb->root_switch, true); tcm->hotplug_active = false; mutex_unlock(&tb->lock); return 0; } static void tb_remove_work(struct work_struct *work) { struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); struct tb *tb = tcm_to_tb(tcm); mutex_lock(&tb->lock); if (tb->root_switch) { tb_free_unplugged_children(tb->root_switch); tb_free_unplugged_xdomains(tb->root_switch); } mutex_unlock(&tb->lock); } static int tb_runtime_resume(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel, *n; mutex_lock(&tb->lock); tb_switch_resume(tb->root_switch, true); tb_free_invalid_tunnels(tb); tb_restore_children(tb->root_switch); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) tb_tunnel_restart(tunnel); tcm->hotplug_active = true; mutex_unlock(&tb->lock); /* * Schedule cleanup of any unplugged devices. Run this in a * separate thread to avoid possible deadlock if the device * removal runtime resumes the unplugged device. */ queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); return 0; } static const struct tb_cm_ops tb_cm_ops = { .start = tb_start, .stop = tb_stop, .deinit = tb_deinit, .suspend_noirq = tb_suspend_noirq, .resume_noirq = tb_resume_noirq, .freeze_noirq = tb_freeze_noirq, .thaw_noirq = tb_thaw_noirq, .complete = tb_complete, .runtime_suspend = tb_runtime_suspend, .runtime_resume = tb_runtime_resume, .handle_event = tb_handle_event, .disapprove_switch = tb_disconnect_pci, .approve_switch = tb_tunnel_pci, .approve_xdomain_paths = tb_approve_xdomain_paths, .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, }; /* * During suspend the Thunderbolt controller is reset and all PCIe * tunnels are lost. The NHI driver will try to reestablish all tunnels * during resume. This adds device links between the tunneled PCIe * downstream ports and the NHI so that the device core will make sure * NHI is resumed first before the rest. */ static bool tb_apple_add_links(struct tb_nhi *nhi) { struct pci_dev *upstream, *pdev; bool ret; if (!x86_apple_machine) return false; switch (nhi->pdev->device) { case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: break; default: return false; } upstream = pci_upstream_bridge(nhi->pdev); while (upstream) { if (!pci_is_pcie(upstream)) return false; if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) break; upstream = pci_upstream_bridge(upstream); } if (!upstream) return false; /* * For each hotplug downstream port, create add device link * back to NHI so that PCIe tunnels can be re-established after * sleep. */ ret = false; for_each_pci_bridge(pdev, upstream->subordinate) { const struct device_link *link; if (!pci_is_pcie(pdev)) continue; if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || !pdev->is_hotplug_bridge) continue; link = device_link_add(&pdev->dev, &nhi->pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER | DL_FLAG_PM_RUNTIME); if (link) { dev_dbg(&nhi->pdev->dev, "created link from %s\n", dev_name(&pdev->dev)); ret = true; } else { dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", dev_name(&pdev->dev)); } } return ret; } struct tb *tb_probe(struct tb_nhi *nhi) { struct tb_cm *tcm; struct tb *tb; tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); if (!tb) return NULL; if (tb_acpi_may_tunnel_pcie()) tb->security_level = TB_SECURITY_USER; else tb->security_level = TB_SECURITY_NOPCIE; tb->cm_ops = &tb_cm_ops; tcm = tb_priv(tb); INIT_LIST_HEAD(&tcm->tunnel_list); INIT_LIST_HEAD(&tcm->dp_resources); INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); tb_init_bandwidth_groups(tcm); tb_dbg(tb, "using software connection manager\n"); /* * Device links are needed to make sure we establish tunnels * before the PCIe/USB stack is resumed so complain here if we * found them missing. */ if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi)) tb_warn(tb, "device links to tunneled native ports are missing!\n"); return tb; }
/***********************license start*************** * Author: Cavium Networks * * Contact: [email protected] * This file is part of the OCTEON SDK * * Copyright (c) 2003-2012 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ #ifndef __CVMX_AGL_DEFS_H__ #define __CVMX_AGL_DEFS_H__ #define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull)) #define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull)) #define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull)) #define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull)) #define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8) #define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8) #define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8) #define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull)) #define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull)) #define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull)) #define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048) #define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull)) #define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull)) #define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull)) #define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull)) #define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull)) #define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull)) #define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull)) #define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull)) #define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull)) #define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull)) #define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8) union cvmx_agl_gmx_bad_reg { uint64_t u64; struct cvmx_agl_gmx_bad_reg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_38_63:26; uint64_t txpsh1:1; uint64_t txpop1:1; uint64_t ovrflw1:1; uint64_t txpsh:1; uint64_t txpop:1; uint64_t ovrflw:1; uint64_t reserved_27_31:5; uint64_t statovr:1; uint64_t reserved_24_25:2; uint64_t loststat:2; uint64_t reserved_4_21:18; uint64_t out_ovr:2; uint64_t reserved_0_1:2; #else uint64_t reserved_0_1:2; uint64_t out_ovr:2; uint64_t reserved_4_21:18; uint64_t loststat:2; uint64_t reserved_24_25:2; uint64_t statovr:1; uint64_t reserved_27_31:5; uint64_t ovrflw:1; uint64_t txpop:1; uint64_t txpsh:1; uint64_t ovrflw1:1; uint64_t txpop1:1; uint64_t txpsh1:1; uint64_t reserved_38_63:26; #endif } s; struct cvmx_agl_gmx_bad_reg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_38_63:26; uint64_t txpsh1:1; uint64_t txpop1:1; uint64_t ovrflw1:1; uint64_t txpsh:1; uint64_t txpop:1; uint64_t ovrflw:1; uint64_t reserved_27_31:5; uint64_t statovr:1; uint64_t reserved_23_25:3; uint64_t loststat:1; uint64_t reserved_4_21:18; uint64_t out_ovr:2; uint64_t reserved_0_1:2; #else uint64_t reserved_0_1:2; uint64_t out_ovr:2; uint64_t reserved_4_21:18; uint64_t loststat:1; uint64_t reserved_23_25:3; uint64_t statovr:1; uint64_t reserved_27_31:5; uint64_t ovrflw:1; uint64_t txpop:1; uint64_t txpsh:1; uint64_t ovrflw1:1; uint64_t txpop1:1; uint64_t txpsh1:1; uint64_t reserved_38_63:26; #endif } cn52xx; struct cvmx_agl_gmx_bad_reg_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_35_63:29; uint64_t txpsh:1; uint64_t txpop:1; uint64_t ovrflw:1; uint64_t reserved_27_31:5; uint64_t statovr:1; uint64_t reserved_23_25:3; uint64_t loststat:1; uint64_t reserved_3_21:19; uint64_t out_ovr:1; uint64_t reserved_0_1:2; #else uint64_t reserved_0_1:2; uint64_t out_ovr:1; uint64_t reserved_3_21:19; uint64_t loststat:1; uint64_t reserved_23_25:3; uint64_t statovr:1; uint64_t reserved_27_31:5; uint64_t ovrflw:1; uint64_t txpop:1; uint64_t txpsh:1; uint64_t reserved_35_63:29; #endif } cn56xx; }; union cvmx_agl_gmx_bist { uint64_t u64; struct cvmx_agl_gmx_bist_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_25_63:39; uint64_t status:25; #else uint64_t status:25; uint64_t reserved_25_63:39; #endif } s; struct cvmx_agl_gmx_bist_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; uint64_t status:10; #else uint64_t status:10; uint64_t reserved_10_63:54; #endif } cn52xx; }; union cvmx_agl_gmx_drv_ctl { uint64_t u64; struct cvmx_agl_gmx_drv_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_49_63:15; uint64_t byp_en1:1; uint64_t reserved_45_47:3; uint64_t pctl1:5; uint64_t reserved_37_39:3; uint64_t nctl1:5; uint64_t reserved_17_31:15; uint64_t byp_en:1; uint64_t reserved_13_15:3; uint64_t pctl:5; uint64_t reserved_5_7:3; uint64_t nctl:5; #else uint64_t nctl:5; uint64_t reserved_5_7:3; uint64_t pctl:5; uint64_t reserved_13_15:3; uint64_t byp_en:1; uint64_t reserved_17_31:15; uint64_t nctl1:5; uint64_t reserved_37_39:3; uint64_t pctl1:5; uint64_t reserved_45_47:3; uint64_t byp_en1:1; uint64_t reserved_49_63:15; #endif } s; struct cvmx_agl_gmx_drv_ctl_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; uint64_t byp_en:1; uint64_t reserved_13_15:3; uint64_t pctl:5; uint64_t reserved_5_7:3; uint64_t nctl:5; #else uint64_t nctl:5; uint64_t reserved_5_7:3; uint64_t pctl:5; uint64_t reserved_13_15:3; uint64_t byp_en:1; uint64_t reserved_17_63:47; #endif } cn56xx; }; union cvmx_agl_gmx_inf_mode { uint64_t u64; struct cvmx_agl_gmx_inf_mode_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; uint64_t en:1; uint64_t reserved_0_0:1; #else uint64_t reserved_0_0:1; uint64_t en:1; uint64_t reserved_2_63:62; #endif } s; }; union cvmx_agl_gmx_prtx_cfg { uint64_t u64; struct cvmx_agl_gmx_prtx_cfg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_14_63:50; uint64_t tx_idle:1; uint64_t rx_idle:1; uint64_t reserved_9_11:3; uint64_t speed_msb:1; uint64_t reserved_7_7:1; uint64_t burst:1; uint64_t tx_en:1; uint64_t rx_en:1; uint64_t slottime:1; uint64_t duplex:1; uint64_t speed:1; uint64_t en:1; #else uint64_t en:1; uint64_t speed:1; uint64_t duplex:1; uint64_t slottime:1; uint64_t rx_en:1; uint64_t tx_en:1; uint64_t burst:1; uint64_t reserved_7_7:1; uint64_t speed_msb:1; uint64_t reserved_9_11:3; uint64_t rx_idle:1; uint64_t tx_idle:1; uint64_t reserved_14_63:50; #endif } s; struct cvmx_agl_gmx_prtx_cfg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t tx_en:1; uint64_t rx_en:1; uint64_t slottime:1; uint64_t duplex:1; uint64_t speed:1; uint64_t en:1; #else uint64_t en:1; uint64_t speed:1; uint64_t duplex:1; uint64_t slottime:1; uint64_t rx_en:1; uint64_t tx_en:1; uint64_t reserved_6_63:58; #endif } cn52xx; }; union cvmx_agl_gmx_rxx_adr_cam0 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam0_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam1 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam1_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam2 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam2_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam3 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam3_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam4 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam4_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam5 { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam5_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t adr:64; #else uint64_t adr:64; #endif } s; }; union cvmx_agl_gmx_rxx_adr_cam_en { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam_en_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; uint64_t en:8; #else uint64_t en:8; uint64_t reserved_8_63:56; #endif } s; }; union cvmx_agl_gmx_rxx_adr_ctl { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_4_63:60; uint64_t cam_mode:1; uint64_t mcst:2; uint64_t bcst:1; #else uint64_t bcst:1; uint64_t mcst:2; uint64_t cam_mode:1; uint64_t reserved_4_63:60; #endif } s; }; union cvmx_agl_gmx_rxx_decision { uint64_t u64; struct cvmx_agl_gmx_rxx_decision_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_5_63:59; uint64_t cnt:5; #else uint64_t cnt:5; uint64_t reserved_5_63:59; #endif } s; }; union cvmx_agl_gmx_rxx_frm_chk { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_chk_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; uint64_t niberr:1; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t carext:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t carext:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t niberr:1; uint64_t reserved_10_63:54; #endif } s; struct cvmx_agl_gmx_rxx_frm_chk_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t reserved_1_1:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t reserved_1_1:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t reserved_9_63:55; #endif } cn52xx; }; union cvmx_agl_gmx_rxx_frm_ctl { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_13_63:51; uint64_t ptp_mode:1; uint64_t reserved_11_11:1; uint64_t null_dis:1; uint64_t pre_align:1; uint64_t pad_len:1; uint64_t vlan_len:1; uint64_t pre_free:1; uint64_t ctl_smac:1; uint64_t ctl_mcst:1; uint64_t ctl_bck:1; uint64_t ctl_drp:1; uint64_t pre_strp:1; uint64_t pre_chk:1; #else uint64_t pre_chk:1; uint64_t pre_strp:1; uint64_t ctl_drp:1; uint64_t ctl_bck:1; uint64_t ctl_mcst:1; uint64_t ctl_smac:1; uint64_t pre_free:1; uint64_t vlan_len:1; uint64_t pad_len:1; uint64_t pre_align:1; uint64_t null_dis:1; uint64_t reserved_11_11:1; uint64_t ptp_mode:1; uint64_t reserved_13_63:51; #endif } s; struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; uint64_t pre_align:1; uint64_t pad_len:1; uint64_t vlan_len:1; uint64_t pre_free:1; uint64_t ctl_smac:1; uint64_t ctl_mcst:1; uint64_t ctl_bck:1; uint64_t ctl_drp:1; uint64_t pre_strp:1; uint64_t pre_chk:1; #else uint64_t pre_chk:1; uint64_t pre_strp:1; uint64_t ctl_drp:1; uint64_t ctl_bck:1; uint64_t ctl_mcst:1; uint64_t ctl_smac:1; uint64_t pre_free:1; uint64_t vlan_len:1; uint64_t pad_len:1; uint64_t pre_align:1; uint64_t reserved_10_63:54; #endif } cn52xx; }; union cvmx_agl_gmx_rxx_frm_max { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_max_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t len:16; #else uint64_t len:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_rxx_frm_min { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_min_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t len:16; #else uint64_t len:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_rxx_ifg { uint64_t u64; struct cvmx_agl_gmx_rxx_ifg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_4_63:60; uint64_t ifg:4; #else uint64_t ifg:4; uint64_t reserved_4_63:60; #endif } s; }; union cvmx_agl_gmx_rxx_int_en { uint64_t u64; struct cvmx_agl_gmx_rxx_int_en_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; uint64_t pause_drp:1; uint64_t phy_dupx:1; uint64_t phy_spd:1; uint64_t phy_link:1; uint64_t ifgerr:1; uint64_t coldet:1; uint64_t falerr:1; uint64_t rsverr:1; uint64_t pcterr:1; uint64_t ovrerr:1; uint64_t niberr:1; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t carext:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t carext:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t niberr:1; uint64_t ovrerr:1; uint64_t pcterr:1; uint64_t rsverr:1; uint64_t falerr:1; uint64_t coldet:1; uint64_t ifgerr:1; uint64_t phy_link:1; uint64_t phy_spd:1; uint64_t phy_dupx:1; uint64_t pause_drp:1; uint64_t reserved_20_63:44; #endif } s; struct cvmx_agl_gmx_rxx_int_en_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; uint64_t pause_drp:1; uint64_t reserved_16_18:3; uint64_t ifgerr:1; uint64_t coldet:1; uint64_t falerr:1; uint64_t rsverr:1; uint64_t pcterr:1; uint64_t ovrerr:1; uint64_t reserved_9_9:1; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t reserved_1_1:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t reserved_1_1:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t reserved_9_9:1; uint64_t ovrerr:1; uint64_t pcterr:1; uint64_t rsverr:1; uint64_t falerr:1; uint64_t coldet:1; uint64_t ifgerr:1; uint64_t reserved_16_18:3; uint64_t pause_drp:1; uint64_t reserved_20_63:44; #endif } cn52xx; }; union cvmx_agl_gmx_rxx_int_reg { uint64_t u64; struct cvmx_agl_gmx_rxx_int_reg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; uint64_t pause_drp:1; uint64_t phy_dupx:1; uint64_t phy_spd:1; uint64_t phy_link:1; uint64_t ifgerr:1; uint64_t coldet:1; uint64_t falerr:1; uint64_t rsverr:1; uint64_t pcterr:1; uint64_t ovrerr:1; uint64_t niberr:1; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t carext:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t carext:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t niberr:1; uint64_t ovrerr:1; uint64_t pcterr:1; uint64_t rsverr:1; uint64_t falerr:1; uint64_t coldet:1; uint64_t ifgerr:1; uint64_t phy_link:1; uint64_t phy_spd:1; uint64_t phy_dupx:1; uint64_t pause_drp:1; uint64_t reserved_20_63:44; #endif } s; struct cvmx_agl_gmx_rxx_int_reg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_20_63:44; uint64_t pause_drp:1; uint64_t reserved_16_18:3; uint64_t ifgerr:1; uint64_t coldet:1; uint64_t falerr:1; uint64_t rsverr:1; uint64_t pcterr:1; uint64_t ovrerr:1; uint64_t reserved_9_9:1; uint64_t skperr:1; uint64_t rcverr:1; uint64_t lenerr:1; uint64_t alnerr:1; uint64_t fcserr:1; uint64_t jabber:1; uint64_t maxerr:1; uint64_t reserved_1_1:1; uint64_t minerr:1; #else uint64_t minerr:1; uint64_t reserved_1_1:1; uint64_t maxerr:1; uint64_t jabber:1; uint64_t fcserr:1; uint64_t alnerr:1; uint64_t lenerr:1; uint64_t rcverr:1; uint64_t skperr:1; uint64_t reserved_9_9:1; uint64_t ovrerr:1; uint64_t pcterr:1; uint64_t rsverr:1; uint64_t falerr:1; uint64_t coldet:1; uint64_t ifgerr:1; uint64_t reserved_16_18:3; uint64_t pause_drp:1; uint64_t reserved_20_63:44; #endif } cn52xx; }; union cvmx_agl_gmx_rxx_jabber { uint64_t u64; struct cvmx_agl_gmx_rxx_jabber_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t cnt:16; #else uint64_t cnt:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_rxx_pause_drop_time { uint64_t u64; struct cvmx_agl_gmx_rxx_pause_drop_time_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t status:16; #else uint64_t status:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_rxx_rx_inbnd { uint64_t u64; struct cvmx_agl_gmx_rxx_rx_inbnd_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_4_63:60; uint64_t duplex:1; uint64_t speed:2; uint64_t status:1; #else uint64_t status:1; uint64_t speed:2; uint64_t duplex:1; uint64_t reserved_4_63:60; #endif } s; }; union cvmx_agl_gmx_rxx_stats_ctl { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; uint64_t rd_clr:1; #else uint64_t rd_clr:1; uint64_t reserved_1_63:63; #endif } s; }; union cvmx_agl_gmx_rxx_stats_octs { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t cnt:48; #else uint64_t cnt:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_rxx_stats_octs_ctl { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t cnt:48; #else uint64_t cnt:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_rxx_stats_octs_dmac { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t cnt:48; #else uint64_t cnt:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_rxx_stats_octs_drp { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_drp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t cnt:48; #else uint64_t cnt:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_rxx_stats_pkts { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t cnt:32; #else uint64_t cnt:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_rxx_stats_pkts_bad { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t cnt:32; #else uint64_t cnt:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_rxx_stats_pkts_ctl { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t cnt:32; #else uint64_t cnt:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_rxx_stats_pkts_dmac { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t cnt:32; #else uint64_t cnt:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_rxx_stats_pkts_drp { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t cnt:32; #else uint64_t cnt:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_rxx_udd_skp { uint64_t u64; struct cvmx_agl_gmx_rxx_udd_skp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; uint64_t fcssel:1; uint64_t reserved_7_7:1; uint64_t len:7; #else uint64_t len:7; uint64_t reserved_7_7:1; uint64_t fcssel:1; uint64_t reserved_9_63:55; #endif } s; }; union cvmx_agl_gmx_rx_bp_dropx { uint64_t u64; struct cvmx_agl_gmx_rx_bp_dropx_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t mark:6; #else uint64_t mark:6; uint64_t reserved_6_63:58; #endif } s; }; union cvmx_agl_gmx_rx_bp_offx { uint64_t u64; struct cvmx_agl_gmx_rx_bp_offx_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t mark:6; #else uint64_t mark:6; uint64_t reserved_6_63:58; #endif } s; }; union cvmx_agl_gmx_rx_bp_onx { uint64_t u64; struct cvmx_agl_gmx_rx_bp_onx_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; uint64_t mark:9; #else uint64_t mark:9; uint64_t reserved_9_63:55; #endif } s; }; union cvmx_agl_gmx_rx_prt_info { uint64_t u64; struct cvmx_agl_gmx_rx_prt_info_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_18_63:46; uint64_t drop:2; uint64_t reserved_2_15:14; uint64_t commit:2; #else uint64_t commit:2; uint64_t reserved_2_15:14; uint64_t drop:2; uint64_t reserved_18_63:46; #endif } s; struct cvmx_agl_gmx_rx_prt_info_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; uint64_t drop:1; uint64_t reserved_1_15:15; uint64_t commit:1; #else uint64_t commit:1; uint64_t reserved_1_15:15; uint64_t drop:1; uint64_t reserved_17_63:47; #endif } cn56xx; }; union cvmx_agl_gmx_rx_tx_status { uint64_t u64; struct cvmx_agl_gmx_rx_tx_status_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t tx:2; uint64_t reserved_2_3:2; uint64_t rx:2; #else uint64_t rx:2; uint64_t reserved_2_3:2; uint64_t tx:2; uint64_t reserved_6_63:58; #endif } s; struct cvmx_agl_gmx_rx_tx_status_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_5_63:59; uint64_t tx:1; uint64_t reserved_1_3:3; uint64_t rx:1; #else uint64_t rx:1; uint64_t reserved_1_3:3; uint64_t tx:1; uint64_t reserved_5_63:59; #endif } cn56xx; }; union cvmx_agl_gmx_smacx { uint64_t u64; struct cvmx_agl_gmx_smacx_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t smac:48; #else uint64_t smac:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_stat_bp { uint64_t u64; struct cvmx_agl_gmx_stat_bp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; uint64_t bp:1; uint64_t cnt:16; #else uint64_t cnt:16; uint64_t bp:1; uint64_t reserved_17_63:47; #endif } s; }; union cvmx_agl_gmx_txx_append { uint64_t u64; struct cvmx_agl_gmx_txx_append_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_4_63:60; uint64_t force_fcs:1; uint64_t fcs:1; uint64_t pad:1; uint64_t preamble:1; #else uint64_t preamble:1; uint64_t pad:1; uint64_t fcs:1; uint64_t force_fcs:1; uint64_t reserved_4_63:60; #endif } s; }; union cvmx_agl_gmx_txx_clk { uint64_t u64; struct cvmx_agl_gmx_txx_clk_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t clk_cnt:6; #else uint64_t clk_cnt:6; uint64_t reserved_6_63:58; #endif } s; }; union cvmx_agl_gmx_txx_ctl { uint64_t u64; struct cvmx_agl_gmx_txx_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; uint64_t xsdef_en:1; uint64_t xscol_en:1; #else uint64_t xscol_en:1; uint64_t xsdef_en:1; uint64_t reserved_2_63:62; #endif } s; }; union cvmx_agl_gmx_txx_min_pkt { uint64_t u64; struct cvmx_agl_gmx_txx_min_pkt_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; uint64_t min_size:8; #else uint64_t min_size:8; uint64_t reserved_8_63:56; #endif } s; }; union cvmx_agl_gmx_txx_pause_pkt_interval { uint64_t u64; struct cvmx_agl_gmx_txx_pause_pkt_interval_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t interval:16; #else uint64_t interval:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_txx_pause_pkt_time { uint64_t u64; struct cvmx_agl_gmx_txx_pause_pkt_time_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t time:16; #else uint64_t time:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_txx_pause_togo { uint64_t u64; struct cvmx_agl_gmx_txx_pause_togo_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t time:16; #else uint64_t time:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_txx_pause_zero { uint64_t u64; struct cvmx_agl_gmx_txx_pause_zero_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; uint64_t send:1; #else uint64_t send:1; uint64_t reserved_1_63:63; #endif } s; }; union cvmx_agl_gmx_txx_soft_pause { uint64_t u64; struct cvmx_agl_gmx_txx_soft_pause_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t time:16; #else uint64_t time:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_txx_stat0 { uint64_t u64; struct cvmx_agl_gmx_txx_stat0_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t xsdef:32; uint64_t xscol:32; #else uint64_t xscol:32; uint64_t xsdef:32; #endif } s; }; union cvmx_agl_gmx_txx_stat1 { uint64_t u64; struct cvmx_agl_gmx_txx_stat1_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t scol:32; uint64_t mcol:32; #else uint64_t mcol:32; uint64_t scol:32; #endif } s; }; union cvmx_agl_gmx_txx_stat2 { uint64_t u64; struct cvmx_agl_gmx_txx_stat2_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t octs:48; #else uint64_t octs:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_txx_stat3 { uint64_t u64; struct cvmx_agl_gmx_txx_stat3_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_32_63:32; uint64_t pkts:32; #else uint64_t pkts:32; uint64_t reserved_32_63:32; #endif } s; }; union cvmx_agl_gmx_txx_stat4 { uint64_t u64; struct cvmx_agl_gmx_txx_stat4_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t hist1:32; uint64_t hist0:32; #else uint64_t hist0:32; uint64_t hist1:32; #endif } s; }; union cvmx_agl_gmx_txx_stat5 { uint64_t u64; struct cvmx_agl_gmx_txx_stat5_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t hist3:32; uint64_t hist2:32; #else uint64_t hist2:32; uint64_t hist3:32; #endif } s; }; union cvmx_agl_gmx_txx_stat6 { uint64_t u64; struct cvmx_agl_gmx_txx_stat6_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t hist5:32; uint64_t hist4:32; #else uint64_t hist4:32; uint64_t hist5:32; #endif } s; }; union cvmx_agl_gmx_txx_stat7 { uint64_t u64; struct cvmx_agl_gmx_txx_stat7_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t hist7:32; uint64_t hist6:32; #else uint64_t hist6:32; uint64_t hist7:32; #endif } s; }; union cvmx_agl_gmx_txx_stat8 { uint64_t u64; struct cvmx_agl_gmx_txx_stat8_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t mcst:32; uint64_t bcst:32; #else uint64_t bcst:32; uint64_t mcst:32; #endif } s; }; union cvmx_agl_gmx_txx_stat9 { uint64_t u64; struct cvmx_agl_gmx_txx_stat9_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t undflw:32; uint64_t ctl:32; #else uint64_t ctl:32; uint64_t undflw:32; #endif } s; }; union cvmx_agl_gmx_txx_stats_ctl { uint64_t u64; struct cvmx_agl_gmx_txx_stats_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; uint64_t rd_clr:1; #else uint64_t rd_clr:1; uint64_t reserved_1_63:63; #endif } s; }; union cvmx_agl_gmx_txx_thresh { uint64_t u64; struct cvmx_agl_gmx_txx_thresh_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_6_63:58; uint64_t cnt:6; #else uint64_t cnt:6; uint64_t reserved_6_63:58; #endif } s; }; union cvmx_agl_gmx_tx_bp { uint64_t u64; struct cvmx_agl_gmx_tx_bp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_2_63:62; uint64_t bp:2; #else uint64_t bp:2; uint64_t reserved_2_63:62; #endif } s; struct cvmx_agl_gmx_tx_bp_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_1_63:63; uint64_t bp:1; #else uint64_t bp:1; uint64_t reserved_1_63:63; #endif } cn56xx; }; union cvmx_agl_gmx_tx_col_attempt { uint64_t u64; struct cvmx_agl_gmx_tx_col_attempt_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_5_63:59; uint64_t limit:5; #else uint64_t limit:5; uint64_t reserved_5_63:59; #endif } s; }; union cvmx_agl_gmx_tx_ifg { uint64_t u64; struct cvmx_agl_gmx_tx_ifg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; uint64_t ifg2:4; uint64_t ifg1:4; #else uint64_t ifg1:4; uint64_t ifg2:4; uint64_t reserved_8_63:56; #endif } s; }; union cvmx_agl_gmx_tx_int_en { uint64_t u64; struct cvmx_agl_gmx_tx_int_en_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_22_63:42; uint64_t ptp_lost:2; uint64_t reserved_18_19:2; uint64_t late_col:2; uint64_t reserved_14_15:2; uint64_t xsdef:2; uint64_t reserved_10_11:2; uint64_t xscol:2; uint64_t reserved_4_7:4; uint64_t undflw:2; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:2; uint64_t reserved_4_7:4; uint64_t xscol:2; uint64_t reserved_10_11:2; uint64_t xsdef:2; uint64_t reserved_14_15:2; uint64_t late_col:2; uint64_t reserved_18_19:2; uint64_t ptp_lost:2; uint64_t reserved_22_63:42; #endif } s; struct cvmx_agl_gmx_tx_int_en_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_18_63:46; uint64_t late_col:2; uint64_t reserved_14_15:2; uint64_t xsdef:2; uint64_t reserved_10_11:2; uint64_t xscol:2; uint64_t reserved_4_7:4; uint64_t undflw:2; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:2; uint64_t reserved_4_7:4; uint64_t xscol:2; uint64_t reserved_10_11:2; uint64_t xsdef:2; uint64_t reserved_14_15:2; uint64_t late_col:2; uint64_t reserved_18_63:46; #endif } cn52xx; struct cvmx_agl_gmx_tx_int_en_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; uint64_t late_col:1; uint64_t reserved_13_15:3; uint64_t xsdef:1; uint64_t reserved_9_11:3; uint64_t xscol:1; uint64_t reserved_3_7:5; uint64_t undflw:1; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:1; uint64_t reserved_3_7:5; uint64_t xscol:1; uint64_t reserved_9_11:3; uint64_t xsdef:1; uint64_t reserved_13_15:3; uint64_t late_col:1; uint64_t reserved_17_63:47; #endif } cn56xx; }; union cvmx_agl_gmx_tx_int_reg { uint64_t u64; struct cvmx_agl_gmx_tx_int_reg_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_22_63:42; uint64_t ptp_lost:2; uint64_t reserved_18_19:2; uint64_t late_col:2; uint64_t reserved_14_15:2; uint64_t xsdef:2; uint64_t reserved_10_11:2; uint64_t xscol:2; uint64_t reserved_4_7:4; uint64_t undflw:2; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:2; uint64_t reserved_4_7:4; uint64_t xscol:2; uint64_t reserved_10_11:2; uint64_t xsdef:2; uint64_t reserved_14_15:2; uint64_t late_col:2; uint64_t reserved_18_19:2; uint64_t ptp_lost:2; uint64_t reserved_22_63:42; #endif } s; struct cvmx_agl_gmx_tx_int_reg_cn52xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_18_63:46; uint64_t late_col:2; uint64_t reserved_14_15:2; uint64_t xsdef:2; uint64_t reserved_10_11:2; uint64_t xscol:2; uint64_t reserved_4_7:4; uint64_t undflw:2; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:2; uint64_t reserved_4_7:4; uint64_t xscol:2; uint64_t reserved_10_11:2; uint64_t xsdef:2; uint64_t reserved_14_15:2; uint64_t late_col:2; uint64_t reserved_18_63:46; #endif } cn52xx; struct cvmx_agl_gmx_tx_int_reg_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_17_63:47; uint64_t late_col:1; uint64_t reserved_13_15:3; uint64_t xsdef:1; uint64_t reserved_9_11:3; uint64_t xscol:1; uint64_t reserved_3_7:5; uint64_t undflw:1; uint64_t reserved_1_1:1; uint64_t pko_nxa:1; #else uint64_t pko_nxa:1; uint64_t reserved_1_1:1; uint64_t undflw:1; uint64_t reserved_3_7:5; uint64_t xscol:1; uint64_t reserved_9_11:3; uint64_t xsdef:1; uint64_t reserved_13_15:3; uint64_t late_col:1; uint64_t reserved_17_63:47; #endif } cn56xx; }; union cvmx_agl_gmx_tx_jam { uint64_t u64; struct cvmx_agl_gmx_tx_jam_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_8_63:56; uint64_t jam:8; #else uint64_t jam:8; uint64_t reserved_8_63:56; #endif } s; }; union cvmx_agl_gmx_tx_lfsr { uint64_t u64; struct cvmx_agl_gmx_tx_lfsr_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t lfsr:16; #else uint64_t lfsr:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_gmx_tx_ovr_bp { uint64_t u64; struct cvmx_agl_gmx_tx_ovr_bp_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_10_63:54; uint64_t en:2; uint64_t reserved_6_7:2; uint64_t bp:2; uint64_t reserved_2_3:2; uint64_t ign_full:2; #else uint64_t ign_full:2; uint64_t reserved_2_3:2; uint64_t bp:2; uint64_t reserved_6_7:2; uint64_t en:2; uint64_t reserved_10_63:54; #endif } s; struct cvmx_agl_gmx_tx_ovr_bp_cn56xx { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_9_63:55; uint64_t en:1; uint64_t reserved_5_7:3; uint64_t bp:1; uint64_t reserved_1_3:3; uint64_t ign_full:1; #else uint64_t ign_full:1; uint64_t reserved_1_3:3; uint64_t bp:1; uint64_t reserved_5_7:3; uint64_t en:1; uint64_t reserved_9_63:55; #endif } cn56xx; }; union cvmx_agl_gmx_tx_pause_pkt_dmac { uint64_t u64; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_48_63:16; uint64_t dmac:48; #else uint64_t dmac:48; uint64_t reserved_48_63:16; #endif } s; }; union cvmx_agl_gmx_tx_pause_pkt_type { uint64_t u64; struct cvmx_agl_gmx_tx_pause_pkt_type_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t reserved_16_63:48; uint64_t type:16; #else uint64_t type:16; uint64_t reserved_16_63:48; #endif } s; }; union cvmx_agl_prtx_ctl { uint64_t u64; struct cvmx_agl_prtx_ctl_s { #ifdef __BIG_ENDIAN_BITFIELD uint64_t drv_byp:1; uint64_t reserved_62_62:1; uint64_t cmp_pctl:6; uint64_t reserved_54_55:2; uint64_t cmp_nctl:6; uint64_t reserved_46_47:2; uint64_t drv_pctl:6; uint64_t reserved_38_39:2; uint64_t drv_nctl:6; uint64_t reserved_29_31:3; uint64_t clk_set:5; uint64_t clkrx_byp:1; uint64_t reserved_21_22:2; uint64_t clkrx_set:5; uint64_t clktx_byp:1; uint64_t reserved_13_14:2; uint64_t clktx_set:5; uint64_t reserved_5_7:3; uint64_t dllrst:1; uint64_t comp:1; uint64_t enable:1; uint64_t clkrst:1; uint64_t mode:1; #else uint64_t mode:1; uint64_t clkrst:1; uint64_t enable:1; uint64_t comp:1; uint64_t dllrst:1; uint64_t reserved_5_7:3; uint64_t clktx_set:5; uint64_t reserved_13_14:2; uint64_t clktx_byp:1; uint64_t clkrx_set:5; uint64_t reserved_21_22:2; uint64_t clkrx_byp:1; uint64_t clk_set:5; uint64_t reserved_29_31:3; uint64_t drv_nctl:6; uint64_t reserved_38_39:2; uint64_t drv_pctl:6; uint64_t reserved_46_47:2; uint64_t cmp_nctl:6; uint64_t reserved_54_55:2; uint64_t cmp_pctl:6; uint64_t reserved_62_62:1; uint64_t drv_byp:1; #endif } s; }; #endif
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #include <type_support.h> #include "system_global.h" #include "ibuf_ctrl_global.h" const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = { 8, /* IBUF_CTRL0_ID supports at most 8 processes */ 4, /* IBUF_CTRL1_ID supports at most 4 processes */ 4 /* IBUF_CTRL2_ID supports at most 4 processes */ };
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/mmdebug.h> #include <linux/highmem.h> #include <linux/poison.h> #include <linux/ratelimit.h> #include <linux/kasan.h> bool _page_poisoning_enabled_early; EXPORT_SYMBOL(_page_poisoning_enabled_early); DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled); EXPORT_SYMBOL(_page_poisoning_enabled); static int __init early_page_poison_param(char *buf) { return kstrtobool(buf, &_page_poisoning_enabled_early); } early_param("page_poison", early_page_poison_param); static void poison_page(struct page *page) { void *addr = kmap_local_page(page); /* KASAN still think the page is in-use, so skip it. */ kasan_disable_current(); memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE); kasan_enable_current(); kunmap_local(addr); } void __kernel_poison_pages(struct page *page, int n) { int i; for (i = 0; i < n; i++) poison_page(page + i); } static bool single_bit_flip(unsigned char a, unsigned char b) { unsigned char error = a ^ b; return error && !(error & (error - 1)); } static void check_poison_mem(struct page *page, unsigned char *mem, size_t bytes) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); unsigned char *start; unsigned char *end; start = memchr_inv(mem, PAGE_POISON, bytes); if (!start) return; for (end = mem + bytes - 1; end > start; end--) { if (*end != PAGE_POISON) break; } if (!__ratelimit(&ratelimit)) return; else if (start == end && single_bit_flip(*start, PAGE_POISON)) pr_err("pagealloc: single bit error\n"); else pr_err("pagealloc: memory corruption\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, end - start + 1, 1); dump_stack(); dump_page(page, "pagealloc: corrupted page details"); } static void unpoison_page(struct page *page) { void *addr; addr = kmap_local_page(page); kasan_disable_current(); /* * Page poisoning when enabled poisons each and every page * that is freed to buddy. Thus no extra check is done to * see if a page was poisoned. */ check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE); kasan_enable_current(); kunmap_local(addr); } void __kernel_unpoison_pages(struct page *page, int n) { int i; for (i = 0; i < n; i++) unpoison_page(page + i); } #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { /* This function does nothing, all work is done via poison pages */ } #endif
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include "qmgr.h" static void nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) { spin_lock(&msgq->lock); msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); } static void nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) { struct nvkm_falcon *falcon = msgq->qmgr->falcon; if (commit) nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); spin_unlock(&msgq->lock); } bool nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq) { u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg); u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); return head == tail; } static int nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size) { struct nvkm_falcon *falcon = msgq->qmgr->falcon; u32 head, tail, available; head = nvkm_falcon_rd32(falcon, msgq->head_reg); /* has the buffer looped? */ if (head < msgq->position) msgq->position = msgq->offset; tail = msgq->position; available = head - tail; if (size > available) { FLCNQ_ERR(msgq, "requested %d bytes, but only %d available", size, available); return -EINVAL; } nvkm_falcon_pio_rd(falcon, 0, DMEM, tail, data, 0, size); msgq->position += ALIGN(size, QUEUE_ALIGNMENT); return 0; } static int nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr) { int ret = 0; nvkm_falcon_msgq_open(msgq); if (nvkm_falcon_msgq_empty(msgq)) goto close; ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE); if (ret) { FLCNQ_ERR(msgq, "failed to read message header"); goto close; } if (hdr->size > MSG_BUF_SIZE) { FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size); ret = -ENOSPC; goto close; } if (hdr->size > HDR_SIZE) { u32 read_size = hdr->size - HDR_SIZE; ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size); if (ret) { FLCNQ_ERR(msgq, "failed to read message data"); goto close; } } ret = 1; close: nvkm_falcon_msgq_close(msgq, (ret >= 0)); return ret; } static int nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr) { struct nvkm_falcon_qmgr_seq *seq; seq = &msgq->qmgr->seq.id[hdr->seq_id]; if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id); return -EINVAL; } if (seq->state == SEQ_STATE_USED) { if (seq->callback) seq->result = seq->callback(seq->priv, hdr); } if (seq->async) { nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq); return 0; } complete_all(&seq->done); return 0; } void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq) { /* * We are invoked from a worker thread, so normally we have plenty of * stack space to work with. */ u8 msg_buffer[MSG_BUF_SIZE]; struct nvfw_falcon_msg *hdr = (void *)msg_buffer; while (nvkm_falcon_msgq_read(msgq, hdr) > 0) nvkm_falcon_msgq_exec(msgq, hdr); } int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq, void *data, u32 size) { struct nvkm_falcon *falcon = msgq->qmgr->falcon; struct nvfw_falcon_msg *hdr = data; int ret; msgq->head_reg = falcon->func->msgq.head; msgq->tail_reg = falcon->func->msgq.tail; msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail); nvkm_falcon_msgq_open(msgq); ret = nvkm_falcon_msgq_pop(msgq, data, size); if (ret == 0 && hdr->size != size) { FLCN_ERR(falcon, "unexpected init message size %d vs %d", hdr->size, size); ret = -EINVAL; } nvkm_falcon_msgq_close(msgq, ret == 0); return ret; } void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq, u32 index, u32 offset, u32 size) { const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func; msgq->head_reg = func->msgq.head + index * func->msgq.stride; msgq->tail_reg = func->msgq.tail + index * func->msgq.stride; msgq->offset = offset; FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x", index, msgq->offset, size); } void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq) { struct nvkm_falcon_msgq *msgq = *pmsgq; if (msgq) { kfree(*pmsgq); *pmsgq = NULL; } } int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, struct nvkm_falcon_msgq **pmsgq) { struct nvkm_falcon_msgq *msgq = *pmsgq; if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL))) return -ENOMEM; msgq->qmgr = qmgr; msgq->name = name; spin_lock_init(&msgq->lock); return 0; }
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_hw_lock_mgr.h" #include "dc_dmub_srv.h" #include "dc_types.h" #include "core_types.h" void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, bool lock, union dmub_hw_lock_flags *hw_locks, struct dmub_hw_lock_inst_flags *inst_flags) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK; cmd.lock_hw.header.sub_type = 0; cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data); cmd.lock_hw.lock_hw_data.client = HW_LOCK_CLIENT_DRIVER; cmd.lock_hw.lock_hw_data.lock = lock; cmd.lock_hw.lock_hw_data.hw_locks.u8All = hw_locks->u8All; memcpy(&cmd.lock_hw.lock_hw_data.inst_flags, inst_flags, sizeof(struct dmub_hw_lock_inst_flags)); if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd) { union dmub_inbox0_data_register data = { 0 }; data.inbox0_cmd_lock_hw = hw_lock_cmd; dc_dmub_srv_clear_inbox0_ack(dmub_srv); dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } bool should_use_dmub_lock(struct dc_link *link) { if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; if (link->replay_settings.replay_feature_enabled) return true; return false; }
// SPDX-License-Identifier: GPL-2.0+ /* * f_audio.c -- USB Audio class function driver * * Copyright (C) 2008 Bryan Wu <[email protected]> * Copyright (C) 2008 Analog Devices, Inc */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/atomic.h> #include "u_uac1_legacy.h" static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value); static int generic_get_cmd(struct usb_audio_control *con, u8 cmd); /* * DESCRIPTORS ... most are static, but strings and full * configuration descriptors are built on demand. */ /* * We have two interfaces- AudioControl and AudioStreaming * TODO: only supcard playback currently */ #define F_AUDIO_AC_INTERFACE 0 #define F_AUDIO_AS_INTERFACE 1 #define F_AUDIO_NUM_INTERFACES 1 /* B.3.1 Standard AC Interface Descriptor */ static struct usb_interface_descriptor ac_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, }; /* * The number of AudioStreaming and MIDIStreaming interfaces * in the Audio Interface Collection */ DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); #define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES) /* 1 input terminal, 1 output terminal and 1 feature unit */ #define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \ + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0)) /* B.3.2 Class-Specific AC Interface Descriptor */ static struct uac1_ac_header_descriptor_1 ac_header_desc = { .bLength = UAC_DT_AC_HEADER_LENGTH, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_HEADER, .bcdADC = cpu_to_le16(0x0100), .wTotalLength = cpu_to_le16(UAC_DT_TOTAL_LENGTH), .bInCollection = F_AUDIO_NUM_INTERFACES, .baInterfaceNr = { /* Interface number of the first AudioStream interface */ [0] = 1, } }; #define INPUT_TERMINAL_ID 1 static struct uac_input_terminal_descriptor input_terminal_desc = { .bLength = UAC_DT_INPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_INPUT_TERMINAL, .bTerminalID = INPUT_TERMINAL_ID, .wTerminalType = UAC_TERMINAL_STREAMING, .bAssocTerminal = 0, .wChannelConfig = 0x3, }; DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0); #define FEATURE_UNIT_ID 2 static struct uac_feature_unit_descriptor_0 feature_unit_desc = { .bLength = UAC_DT_FEATURE_UNIT_SIZE(0), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_FEATURE_UNIT, .bUnitID = FEATURE_UNIT_ID, .bSourceID = INPUT_TERMINAL_ID, .bControlSize = 2, .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME), }; static struct usb_audio_control mute_control = { .list = LIST_HEAD_INIT(mute_control.list), .name = "Mute Control", .type = UAC_FU_MUTE, /* Todo: add real Mute control code */ .set = generic_set_cmd, .get = generic_get_cmd, }; static struct usb_audio_control volume_control = { .list = LIST_HEAD_INIT(volume_control.list), .name = "Volume Control", .type = UAC_FU_VOLUME, /* Todo: add real Volume control code */ .set = generic_set_cmd, .get = generic_get_cmd, }; static struct usb_audio_control_selector feature_unit = { .list = LIST_HEAD_INIT(feature_unit.list), .id = FEATURE_UNIT_ID, .name = "Mute & Volume Control", .type = UAC_FEATURE_UNIT, .desc = (struct usb_descriptor_header *)&feature_unit_desc, }; #define OUTPUT_TERMINAL_ID 3 static struct uac1_output_terminal_descriptor output_terminal_desc = { .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, .bTerminalID = OUTPUT_TERMINAL_ID, .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, .bAssocTerminal = FEATURE_UNIT_ID, .bSourceID = FEATURE_UNIT_ID, }; /* B.4.1 Standard AS Interface Descriptor */ static struct usb_interface_descriptor as_interface_alt_0_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, }; static struct usb_interface_descriptor as_interface_alt_1_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bAlternateSetting = 1, .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, }; /* B.4.2 Class-Specific AS Interface Descriptor */ static struct uac1_as_header_descriptor as_header_desc = { .bLength = UAC_DT_AS_HEADER_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_AS_GENERAL, .bTerminalLink = INPUT_TERMINAL_ID, .bDelay = 1, .wFormatTag = UAC_FORMAT_TYPE_I_PCM, }; DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = { .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_FORMAT_TYPE, .bFormatType = UAC_FORMAT_TYPE_I, .bSubframeSize = 2, .bBitResolution = 16, .bSamFreqType = 1, }; /* Standard ISO OUT Endpoint Descriptor */ static struct usb_endpoint_descriptor as_out_ep_desc = { .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE | USB_ENDPOINT_XFER_ISOC, .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE), .bInterval = 4, }; /* Class-specific AS ISO OUT Endpoint Descriptor */ static struct uac_iso_endpoint_descriptor as_iso_out_desc = { .bLength = UAC_ISO_ENDPOINT_DESC_SIZE, .bDescriptorType = USB_DT_CS_ENDPOINT, .bDescriptorSubtype = UAC_EP_GENERAL, .bmAttributes = 1, .bLockDelayUnits = 1, .wLockDelay = cpu_to_le16(1), }; static struct usb_descriptor_header *f_audio_desc[] = { (struct usb_descriptor_header *)&ac_interface_desc, (struct usb_descriptor_header *)&ac_header_desc, (struct usb_descriptor_header *)&input_terminal_desc, (struct usb_descriptor_header *)&output_terminal_desc, (struct usb_descriptor_header *)&feature_unit_desc, (struct usb_descriptor_header *)&as_interface_alt_0_desc, (struct usb_descriptor_header *)&as_interface_alt_1_desc, (struct usb_descriptor_header *)&as_header_desc, (struct usb_descriptor_header *)&as_type_i_desc, (struct usb_descriptor_header *)&as_out_ep_desc, (struct usb_descriptor_header *)&as_iso_out_desc, NULL, }; enum { STR_AC_IF, STR_INPUT_TERMINAL, STR_INPUT_TERMINAL_CH_NAMES, STR_FEAT_DESC_0, STR_OUTPUT_TERMINAL, STR_AS_IF_ALT0, STR_AS_IF_ALT1, }; static struct usb_string strings_uac1[] = { [STR_AC_IF].s = "AC Interface", [STR_INPUT_TERMINAL].s = "Input terminal", [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels", [STR_FEAT_DESC_0].s = "Volume control & mute", [STR_OUTPUT_TERMINAL].s = "Output terminal", [STR_AS_IF_ALT0].s = "AS Interface", [STR_AS_IF_ALT1].s = "AS Interface", { }, }; static struct usb_gadget_strings str_uac1 = { .language = 0x0409, /* en-us */ .strings = strings_uac1, }; static struct usb_gadget_strings *uac1_strings[] = { &str_uac1, NULL, }; /* * This function is an ALSA sound card following USB Audio Class Spec 1.0. */ /*-------------------------------------------------------------------------*/ struct f_audio_buf { u8 *buf; int actual; struct list_head list; }; static struct f_audio_buf *f_audio_buffer_alloc(int buf_size) { struct f_audio_buf *copy_buf; copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC); if (!copy_buf) return ERR_PTR(-ENOMEM); copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC); if (!copy_buf->buf) { kfree(copy_buf); return ERR_PTR(-ENOMEM); } return copy_buf; } static void f_audio_buffer_free(struct f_audio_buf *audio_buf) { kfree(audio_buf->buf); kfree(audio_buf); } /*-------------------------------------------------------------------------*/ struct f_audio { struct gaudio card; u8 ac_intf, ac_alt; u8 as_intf, as_alt; /* endpoints handle full and/or high speeds */ struct usb_ep *out_ep; spinlock_t lock; struct f_audio_buf *copy_buf; struct work_struct playback_work; struct list_head play_queue; /* Control Set command */ struct list_head cs; u8 set_cmd; struct usb_audio_control *set_con; }; static inline struct f_audio *func_to_audio(struct usb_function *f) { return container_of(f, struct f_audio, card.func); } /*-------------------------------------------------------------------------*/ static void f_audio_playback_work(struct work_struct *data) { struct f_audio *audio = container_of(data, struct f_audio, playback_work); struct f_audio_buf *play_buf; spin_lock_irq(&audio->lock); if (list_empty(&audio->play_queue)) { spin_unlock_irq(&audio->lock); return; } play_buf = list_first_entry(&audio->play_queue, struct f_audio_buf, list); list_del(&play_buf->list); spin_unlock_irq(&audio->lock); u_audio_playback(&audio->card, play_buf->buf, play_buf->actual); f_audio_buffer_free(play_buf); } static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) { struct f_audio *audio = req->context; struct usb_composite_dev *cdev = audio->card.func.config->cdev; struct f_audio_buf *copy_buf = audio->copy_buf; struct f_uac1_legacy_opts *opts; int audio_buf_size; int err; opts = container_of(audio->card.func.fi, struct f_uac1_legacy_opts, func_inst); audio_buf_size = opts->audio_buf_size; if (!copy_buf) return -EINVAL; /* Copy buffer is full, add it to the play_queue */ if (audio_buf_size - copy_buf->actual < req->actual) { spin_lock_irq(&audio->lock); list_add_tail(&copy_buf->list, &audio->play_queue); spin_unlock_irq(&audio->lock); schedule_work(&audio->playback_work); copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(copy_buf)) return -ENOMEM; } memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual); copy_buf->actual += req->actual; audio->copy_buf = copy_buf; err = usb_ep_queue(ep, req, GFP_ATOMIC); if (err) ERROR(cdev, "%s queue req: %d\n", ep->name, err); return 0; } static void f_audio_complete(struct usb_ep *ep, struct usb_request *req) { struct f_audio *audio = req->context; int status = req->status; u32 data = 0; struct usb_ep *out_ep = audio->out_ep; switch (status) { case 0: /* normal completion? */ if (ep == out_ep) f_audio_out_ep_complete(ep, req); else if (audio->set_con) { memcpy(&data, req->buf, req->length); audio->set_con->set(audio->set_con, audio->set_cmd, le16_to_cpu(data)); audio->set_con = NULL; } break; default: break; } } static int audio_set_intf_req(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_audio *audio = func_to_audio(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); u16 len = le16_to_cpu(ctrl->wLength); u16 w_value = le16_to_cpu(ctrl->wValue); u8 con_sel = (w_value >> 8) & 0xFF; u8 cmd = (ctrl->bRequest & 0x0F); struct usb_audio_control_selector *cs; struct usb_audio_control *con; DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n", ctrl->bRequest, w_value, len, id); list_for_each_entry(cs, &audio->cs, list) { if (cs->id == id) { list_for_each_entry(con, &cs->control, list) { if (con->type == con_sel) { audio->set_con = con; break; } } break; } } audio->set_cmd = cmd; req->context = audio; req->complete = f_audio_complete; return len; } static int audio_get_intf_req(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_audio *audio = func_to_audio(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); u16 len = le16_to_cpu(ctrl->wLength); u16 w_value = le16_to_cpu(ctrl->wValue); u8 con_sel = (w_value >> 8) & 0xFF; u8 cmd = (ctrl->bRequest & 0x0F); struct usb_audio_control_selector *cs; struct usb_audio_control *con; DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n", ctrl->bRequest, w_value, len, id); list_for_each_entry(cs, &audio->cs, list) { if (cs->id == id) { list_for_each_entry(con, &cs->control, list) { if (con->type == con_sel && con->get) { value = con->get(con, cmd); break; } } break; } } req->context = audio; req->complete = f_audio_complete; len = min_t(size_t, sizeof(value), len); memcpy(req->buf, &value, len); return len; } static int audio_set_endpoint_req(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u16 ep = le16_to_cpu(ctrl->wIndex); u16 len = le16_to_cpu(ctrl->wLength); u16 w_value = le16_to_cpu(ctrl->wValue); DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", ctrl->bRequest, w_value, len, ep); switch (ctrl->bRequest) { case UAC_SET_CUR: value = len; break; case UAC_SET_MIN: break; case UAC_SET_MAX: break; case UAC_SET_RES: break; case UAC_SET_MEM: break; default: break; } return value; } static int audio_get_endpoint_req(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); u16 len = le16_to_cpu(ctrl->wLength); u16 w_value = le16_to_cpu(ctrl->wValue); DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", ctrl->bRequest, w_value, len, ep); switch (ctrl->bRequest) { case UAC_GET_CUR: case UAC_GET_MIN: case UAC_GET_MAX: case UAC_GET_RES: value = len; break; case UAC_GET_MEM: break; default: break; } return value; } static int f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* composite driver infrastructure handles everything; interface * activation uses set_alt(). */ switch (ctrl->bRequestType) { case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE: value = audio_set_intf_req(f, ctrl); break; case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE: value = audio_get_intf_req(f, ctrl); break; case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: value = audio_set_endpoint_req(f, ctrl); break; case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: value = audio_get_endpoint_req(f, ctrl); break; default: ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(cdev, "audio response on err %d\n", value); } /* device either stalls (value < 0) or reports success */ return value; } static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_audio *audio = func_to_audio(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_ep *out_ep = audio->out_ep; struct usb_request *req; struct f_uac1_legacy_opts *opts; int req_buf_size, req_count, audio_buf_size; int i = 0, err = 0; DBG(cdev, "intf %d, alt %d\n", intf, alt); opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst); req_buf_size = opts->req_buf_size; req_count = opts->req_count; audio_buf_size = opts->audio_buf_size; /* No i/f has more than 2 alt settings */ if (alt > 1) { ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__); return -EINVAL; } if (intf == audio->ac_intf) { /* Control I/f has only 1 AltSetting - 0 */ if (alt) { ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__); return -EINVAL; } return 0; } else if (intf == audio->as_intf) { if (alt == 1) { err = config_ep_by_speed(cdev->gadget, f, out_ep); if (err) return err; usb_ep_enable(out_ep); audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(audio->copy_buf)) return -ENOMEM; /* * allocate a bunch of read buffers * and queue them all at once. */ for (i = 0; i < req_count && err == 0; i++) { req = usb_ep_alloc_request(out_ep, GFP_ATOMIC); if (req) { req->buf = kzalloc(req_buf_size, GFP_ATOMIC); if (req->buf) { req->length = req_buf_size; req->context = audio; req->complete = f_audio_complete; err = usb_ep_queue(out_ep, req, GFP_ATOMIC); if (err) ERROR(cdev, "%s queue req: %d\n", out_ep->name, err); } else err = -ENOMEM; } else err = -ENOMEM; } } else { struct f_audio_buf *copy_buf = audio->copy_buf; if (copy_buf) { list_add_tail(&copy_buf->list, &audio->play_queue); schedule_work(&audio->playback_work); } } audio->as_alt = alt; } return err; } static int f_audio_get_alt(struct usb_function *f, unsigned intf) { struct f_audio *audio = func_to_audio(f); struct usb_composite_dev *cdev = f->config->cdev; if (intf == audio->ac_intf) return audio->ac_alt; else if (intf == audio->as_intf) return audio->as_alt; else ERROR(cdev, "%s:%d Invalid Interface %d!\n", __func__, __LINE__, intf); return -EINVAL; } static void f_audio_disable(struct usb_function *f) { return; } /*-------------------------------------------------------------------------*/ static void f_audio_build_desc(struct f_audio *audio) { struct gaudio *card = &audio->card; u8 *sam_freq; int rate; /* Set channel numbers */ input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card); as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card); /* Set sample rates */ rate = u_audio_get_playback_rate(card); sam_freq = as_type_i_desc.tSamFreq[0]; memcpy(sam_freq, &rate, 3); /* Todo: Set Sample bits and other parameters */ return; } /* audio function driver setup/binding */ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_audio *audio = func_to_audio(f); struct usb_string *us; int status; struct usb_ep *ep = NULL; struct f_uac1_legacy_opts *audio_opts; audio_opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst); audio->card.gadget = c->cdev->gadget; /* set up ASLA audio devices */ if (!audio_opts->bound) { status = gaudio_setup(&audio->card); if (status < 0) return status; audio_opts->bound = true; } us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1)); if (IS_ERR(us)) return PTR_ERR(us); ac_interface_desc.iInterface = us[STR_AC_IF].id; input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id; input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id; feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id; output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id; as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id; as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id; f_audio_build_desc(audio); /* allocate instance-specific interface IDs, and patch descriptors */ status = usb_interface_id(c, f); if (status < 0) goto fail; ac_interface_desc.bInterfaceNumber = status; audio->ac_intf = status; audio->ac_alt = 0; status = usb_interface_id(c, f); if (status < 0) goto fail; as_interface_alt_0_desc.bInterfaceNumber = status; as_interface_alt_1_desc.bInterfaceNumber = status; audio->as_intf = status; audio->as_alt = 0; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc); if (!ep) goto fail; audio->out_ep = ep; audio->out_ep->desc = &as_out_ep_desc; /* copy descriptors, and track endpoint copies */ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL, NULL); if (status) goto fail; return 0; fail: gaudio_cleanup(&audio->card); return status; } /*-------------------------------------------------------------------------*/ static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) { con->data[cmd] = value; return 0; } static int generic_get_cmd(struct usb_audio_control *con, u8 cmd) { return con->data[cmd]; } /* Todo: add more control selecotor dynamically */ static int control_selector_init(struct f_audio *audio) { INIT_LIST_HEAD(&audio->cs); list_add(&feature_unit.list, &audio->cs); INIT_LIST_HEAD(&feature_unit.control); list_add(&mute_control.list, &feature_unit.control); list_add(&volume_control.list, &feature_unit.control); volume_control.data[UAC__CUR] = 0xffc0; volume_control.data[UAC__MIN] = 0xe3a0; volume_control.data[UAC__MAX] = 0xfff0; volume_control.data[UAC__RES] = 0x0030; return 0; } static inline struct f_uac1_legacy_opts *to_f_uac1_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_uac1_legacy_opts, func_inst.group); } static void f_uac1_attr_release(struct config_item *item) { struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); usb_put_function_instance(&opts->func_inst); } static struct configfs_item_operations f_uac1_item_ops = { .release = f_uac1_attr_release, }; #define UAC1_INT_ATTRIBUTE(name) \ static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \ char *page) \ { \ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \ int result; \ \ mutex_lock(&opts->lock); \ result = sprintf(page, "%u\n", opts->name); \ mutex_unlock(&opts->lock); \ \ return result; \ } \ \ static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \ int ret; \ u32 num; \ \ mutex_lock(&opts->lock); \ if (opts->refcnt) { \ ret = -EBUSY; \ goto end; \ } \ \ ret = kstrtou32(page, 0, &num); \ if (ret) \ goto end; \ \ opts->name = num; \ ret = len; \ \ end: \ mutex_unlock(&opts->lock); \ return ret; \ } \ \ CONFIGFS_ATTR(f_uac1_opts_, name) UAC1_INT_ATTRIBUTE(req_buf_size); UAC1_INT_ATTRIBUTE(req_count); UAC1_INT_ATTRIBUTE(audio_buf_size); #define UAC1_STR_ATTRIBUTE(name) \ static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \ char *page) \ { \ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \ int result; \ \ mutex_lock(&opts->lock); \ result = sprintf(page, "%s\n", opts->name); \ mutex_unlock(&opts->lock); \ \ return result; \ } \ \ static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \ int ret = -EBUSY; \ char *tmp; \ \ mutex_lock(&opts->lock); \ if (opts->refcnt) \ goto end; \ \ tmp = kstrndup(page, len, GFP_KERNEL); \ if (tmp) { \ ret = -ENOMEM; \ goto end; \ } \ if (opts->name##_alloc) \ kfree(opts->name); \ opts->name##_alloc = true; \ opts->name = tmp; \ ret = len; \ \ end: \ mutex_unlock(&opts->lock); \ return ret; \ } \ \ CONFIGFS_ATTR(f_uac1_opts_, name) UAC1_STR_ATTRIBUTE(fn_play); UAC1_STR_ATTRIBUTE(fn_cap); UAC1_STR_ATTRIBUTE(fn_cntl); static struct configfs_attribute *f_uac1_attrs[] = { &f_uac1_opts_attr_req_buf_size, &f_uac1_opts_attr_req_count, &f_uac1_opts_attr_audio_buf_size, &f_uac1_opts_attr_fn_play, &f_uac1_opts_attr_fn_cap, &f_uac1_opts_attr_fn_cntl, NULL, }; static const struct config_item_type f_uac1_func_type = { .ct_item_ops = &f_uac1_item_ops, .ct_attrs = f_uac1_attrs, .ct_owner = THIS_MODULE, }; static void f_audio_free_inst(struct usb_function_instance *f) { struct f_uac1_legacy_opts *opts; opts = container_of(f, struct f_uac1_legacy_opts, func_inst); if (opts->fn_play_alloc) kfree(opts->fn_play); if (opts->fn_cap_alloc) kfree(opts->fn_cap); if (opts->fn_cntl_alloc) kfree(opts->fn_cntl); kfree(opts); } static struct usb_function_instance *f_audio_alloc_inst(void) { struct f_uac1_legacy_opts *opts; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); mutex_init(&opts->lock); opts->func_inst.free_func_inst = f_audio_free_inst; config_group_init_type_name(&opts->func_inst.group, "", &f_uac1_func_type); opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE; opts->req_count = UAC1_REQ_COUNT; opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE; opts->fn_play = FILE_PCM_PLAYBACK; opts->fn_cap = FILE_PCM_CAPTURE; opts->fn_cntl = FILE_CONTROL; return &opts->func_inst; } static void f_audio_free(struct usb_function *f) { struct f_audio *audio = func_to_audio(f); struct f_uac1_legacy_opts *opts; gaudio_cleanup(&audio->card); opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst); kfree(audio); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); } static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f) { usb_free_all_descriptors(f); } static struct usb_function *f_audio_alloc(struct usb_function_instance *fi) { struct f_audio *audio; struct f_uac1_legacy_opts *opts; /* allocate and initialize one new instance */ audio = kzalloc(sizeof(*audio), GFP_KERNEL); if (!audio) return ERR_PTR(-ENOMEM); audio->card.func.name = "g_audio"; opts = container_of(fi, struct f_uac1_legacy_opts, func_inst); mutex_lock(&opts->lock); ++opts->refcnt; mutex_unlock(&opts->lock); INIT_LIST_HEAD(&audio->play_queue); spin_lock_init(&audio->lock); audio->card.func.bind = f_audio_bind; audio->card.func.unbind = f_audio_unbind; audio->card.func.set_alt = f_audio_set_alt; audio->card.func.get_alt = f_audio_get_alt; audio->card.func.setup = f_audio_setup; audio->card.func.disable = f_audio_disable; audio->card.func.free_func = f_audio_free; control_selector_init(audio); INIT_WORK(&audio->playback_work, f_audio_playback_work); return &audio->card.func; } DECLARE_USB_FUNCTION_INIT(uac1_legacy, f_audio_alloc_inst, f_audio_alloc); MODULE_DESCRIPTION("USB Audio class function driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bryan Wu");
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2018 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_TPC7_CMDQ_REGS_H_ #define ASIC_REG_TPC7_CMDQ_REGS_H_ /* ***************************************** * TPC7_CMDQ (Prototype: CMDQ) ***************************************** */ #define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000 #define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004 #define mmTPC7_CMDQ_GLBL_PROT 0xFC9008 #define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C #define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010 #define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014 #define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018 #define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C #define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020 #define mmTPC7_CMDQ_GLBL_STS0 0xFC9024 #define mmTPC7_CMDQ_GLBL_STS1 0xFC9028 #define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0 #define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4 #define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8 #define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0 #define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4 #define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8 #define mmTPC7_CMDQ_CQ_CTL 0xFC90CC #define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4 #define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8 #define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC #define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0 #define mmTPC7_CMDQ_CQ_STS0 0xFC90E4 #define mmTPC7_CMDQ_CQ_STS1 0xFC90E8 #define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0 #define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4 #define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8 #define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC #define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108 #define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120 #define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124 #define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128 #define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C #define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130 #define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134 #define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138 #define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C #define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140 #define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144 #define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148 #define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C #define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150 #define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154 #define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158 #define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C #define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160 #define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164 #define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168 #define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C #define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170 #define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174 #define mmTPC7_CMDQ_CP_STS 0xFC9178 #define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C #define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180 #define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184 #define mmTPC7_CMDQ_CP_DBG_0 0xFC9188 #define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308 #define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C #endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale UPM NAND driver. * * Copyright © 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/mtd.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/fsl_lbc.h> struct fsl_upm_nand { struct nand_controller base; struct device *dev; struct nand_chip chip; struct fsl_upm upm; uint8_t upm_addr_offset; uint8_t upm_cmd_offset; void __iomem *io_base; struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS]; uint32_t mchip_offsets[NAND_MAX_CHIPS]; uint32_t mchip_count; uint32_t mchip_number; }; static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) { return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand, chip); } static int fun_chip_init(struct fsl_upm_nand *fun, const struct device_node *upm_np, const struct resource *io_res) { struct mtd_info *mtd = nand_to_mtd(&fun->chip); int ret; struct device_node *flash_np; fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; fun->chip.controller = &fun->base; mtd->dev.parent = fun->dev; flash_np = of_get_next_child(upm_np, NULL); if (!flash_np) return -ENODEV; nand_set_flash_node(&fun->chip, flash_np); mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start, flash_np); if (!mtd->name) { ret = -ENOMEM; goto err; } ret = nand_scan(&fun->chip, fun->mchip_count); if (ret) goto err; ret = mtd_device_register(mtd, NULL, 0); err: of_node_put(flash_np); return ret; } static int func_exec_instr(struct nand_chip *chip, const struct nand_op_instr *instr) { struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number]; unsigned int i; const u8 *out; u8 *in; switch (instr->type) { case NAND_OP_CMD_INSTR: fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset); mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) | reg_offs; fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar); fsl_upm_end_pattern(&fun->upm); return 0; case NAND_OP_ADDR_INSTR: fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset); for (i = 0; i < instr->ctx.addr.naddrs; i++) { mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) | reg_offs; fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar); } fsl_upm_end_pattern(&fun->upm); return 0; case NAND_OP_DATA_IN_INSTR: in = instr->ctx.data.buf.in; for (i = 0; i < instr->ctx.data.len; i++) in[i] = in_8(fun->io_base + reg_offs); return 0; case NAND_OP_DATA_OUT_INSTR: out = instr->ctx.data.buf.out; for (i = 0; i < instr->ctx.data.len; i++) out_8(fun->io_base + reg_offs, out[i]); return 0; case NAND_OP_WAITRDY_INSTR: if (!fun->rnb_gpio[fun->mchip_number]) return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms); return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number], instr->ctx.waitrdy.timeout_ms); default: return -EINVAL; } return 0; } static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); unsigned int i; int ret; if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) return 0; fun->mchip_number = op->cs; for (i = 0; i < op->ninstrs; i++) { ret = func_exec_instr(chip, &op->instrs[i]); if (ret) return ret; if (op->instrs[i].delay_ns) ndelay(op->instrs[i].delay_ns); } return 0; } static const struct nand_controller_ops fun_ops = { .exec_op = fun_exec_op, }; static int fun_probe(struct platform_device *ofdev) { struct fsl_upm_nand *fun; struct resource *io_res; const __be32 *prop; int ret; int size; int i; fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL); if (!fun) return -ENOMEM; fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res); if (IS_ERR(fun->io_base)) return PTR_ERR(fun->io_base); ret = fsl_upm_find(io_res->start, &fun->upm); if (ret) { dev_err(&ofdev->dev, "can't find UPM\n"); return ret; } prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset", &size); if (!prop || size != sizeof(uint32_t)) { dev_err(&ofdev->dev, "can't get UPM address offset\n"); return -EINVAL; } fun->upm_addr_offset = *prop; prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size); if (!prop || size != sizeof(uint32_t)) { dev_err(&ofdev->dev, "can't get UPM command offset\n"); return -EINVAL; } fun->upm_cmd_offset = *prop; prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-line-cs-offsets", &size); if (prop && (size / sizeof(uint32_t)) > 0) { fun->mchip_count = size / sizeof(uint32_t); if (fun->mchip_count >= NAND_MAX_CHIPS) { dev_err(&ofdev->dev, "too much multiple chips\n"); return -EINVAL; } for (i = 0; i < fun->mchip_count; i++) fun->mchip_offsets[i] = be32_to_cpu(prop[i]); } else { fun->mchip_count = 1; } for (i = 0; i < fun->mchip_count; i++) { fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev, NULL, i, GPIOD_IN); if (IS_ERR(fun->rnb_gpio[i])) { dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i); return PTR_ERR(fun->rnb_gpio[i]); } } nand_controller_init(&fun->base); fun->base.ops = &fun_ops; fun->dev = &ofdev->dev; ret = fun_chip_init(fun, ofdev->dev.of_node, io_res); if (ret) return ret; dev_set_drvdata(&ofdev->dev, fun); return 0; } static void fun_remove(struct platform_device *ofdev) { struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); struct nand_chip *chip = &fun->chip; struct mtd_info *mtd = nand_to_mtd(chip); int ret; ret = mtd_device_unregister(mtd); WARN_ON(ret); nand_cleanup(chip); } static const struct of_device_id of_fun_match[] = { { .compatible = "fsl,upm-nand" }, {}, }; MODULE_DEVICE_TABLE(of, of_fun_match); static struct platform_driver of_fun_driver = { .driver = { .name = "fsl,upm-nand", .of_match_table = of_fun_match, }, .probe = fun_probe, .remove = fun_remove, }; module_platform_driver(of_fun_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anton Vorontsov <[email protected]>"); MODULE_DESCRIPTION("Driver for NAND chips working through Freescale " "LocalBus User-Programmable Machine");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef FWH_LOCK_H #define FWH_LOCK_H enum fwh_lock_state { FWH_UNLOCKED = 0, FWH_DENY_WRITE = 1, FWH_IMMUTABLE = 2, FWH_DENY_READ = 4, }; struct fwh_xxlock_thunk { enum fwh_lock_state val; flstate_t state; }; #define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING}) #define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING}) /* * This locking/unlock is specific to firmware hub parts. Only one * is known that supports the Intel command set. Firmware * hub parts cannot be interleaved as they are on the LPC bus * so this code has not been tested with interleaved chips, * and will likely fail in that context. */ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk; int ret; /* Refuse the operation if the we cannot look behind the chip */ if (chip->start < 0x400000) { pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n", __func__, chip->start ); return -EIO; } /* * lock block registers: * - on 64k boundariesand * - bit 1 set high * - block lock registers are 4MiB lower - overflow subtract (danger) * * The address manipulation is first done on the logical address * which is 0 at the start of the chip, and then the offset of * the individual chip is addted to it. Any other order a weird * map offset could cause problems. */ adr = (adr & ~0xffffUL) | 0x2; adr += chip->start - 0x400000; /* * This is easy because these are writes to registers and not writes * to flash memory - that means that we don't have to check status * and timeout. */ mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; } chip->oldstate = chip->state; chip->state = xxlt->state; map_write(map, CMD(xxlt->val), adr); /* Done and happy. */ chip->state = chip->oldstate; put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return 0; } static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len, (void *)&FWH_XXLOCK_ONEBLOCK_LOCK); return ret; } static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len, (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK); return ret; } static void fixup_use_fwh_lock(struct mtd_info *mtd) { printk(KERN_NOTICE "using fwh lock/unlock method\n"); /* Setup for the chips with the fwh lock method */ mtd->_lock = fwh_lock_varsize; mtd->_unlock = fwh_unlock_varsize; } #endif /* FWH_LOCK_H */
// SPDX-License-Identifier: GPL-2.0 #include <linux/seq_file.h> #include <linux/debugfs.h> #include "nitrox_csr.h" #include "nitrox_debugfs.h" #include "nitrox_dev.h" static int firmware_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "Version: %s\n", ndev->hw.fw_name[0]); seq_printf(s, "Version: %s\n", ndev->hw.fw_name[1]); return 0; } DEFINE_SHOW_ATTRIBUTE(firmware); static int device_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "NITROX [%d]\n", ndev->idx); seq_printf(s, " Part Name: %s\n", ndev->hw.partname); seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq); seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id); seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id); seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n", ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores); return 0; } DEFINE_SHOW_ATTRIBUTE(device); static int stats_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx); seq_printf(s, " Posted: %llu\n", (u64)atomic64_read(&ndev->stats.posted)); seq_printf(s, " Completed: %llu\n", (u64)atomic64_read(&ndev->stats.completed)); seq_printf(s, " Dropped: %llu\n", (u64)atomic64_read(&ndev->stats.dropped)); return 0; } DEFINE_SHOW_ATTRIBUTE(stats); void nitrox_debugfs_exit(struct nitrox_device *ndev) { debugfs_remove_recursive(ndev->debugfs_dir); ndev->debugfs_dir = NULL; } void nitrox_debugfs_init(struct nitrox_device *ndev) { struct dentry *dir; dir = debugfs_create_dir(KBUILD_MODNAME, NULL); ndev->debugfs_dir = dir; debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); debugfs_create_file("device", 0400, dir, ndev, &device_fops); debugfs_create_file("stats", 0400, dir, ndev, &stats_fops); }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_GEN_STATS_H #define __NET_GEN_STATS_H #include <linux/gen_stats.h> #include <linux/socket.h> #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> /* Throughput stats. * Must be initialized beforehand with gnet_stats_basic_sync_init(). * * If no reads can ever occur parallel to writes (e.g. stack-allocated * bstats), then the internal stat values can be written to and read * from directly. Otherwise, use _bstats_set/update() for writes and * gnet_stats_add_basic() for reads. */ struct gnet_stats_basic_sync { u64_stats_t bytes; u64_stats_t packets; struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); struct net_rate_estimator; struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; struct nlattr * tail; /* Backward compatibility */ int compat_tc_stats; int compat_xstats; int padattr; void * xstats; int xstats_len; struct tc_stats tc_stats; }; void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b); int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d, int padattr); int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, int xstats_type, spinlock_t *lock, struct gnet_dump *d, int padattr); int gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_sync __percpu *cpu, struct gnet_stats_basic_sync *b, bool running); void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, struct gnet_stats_basic_sync __percpu *cpu, struct gnet_stats_basic_sync *b, bool running); int gnet_stats_copy_basic_hw(struct gnet_dump *d, struct gnet_stats_basic_sync __percpu *cpu, struct gnet_stats_basic_sync *b, bool running); int gnet_stats_copy_rate_est(struct gnet_dump *d, struct net_rate_estimator __rcu **ptr); int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue __percpu *cpu_q, struct gnet_stats_queue *q, __u32 qlen); void gnet_stats_add_queue(struct gnet_stats_queue *qstats, const struct gnet_stats_queue __percpu *cpu_q, const struct gnet_stats_queue *q); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); int gen_new_estimator(struct gnet_stats_basic_sync *bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, bool running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, spinlock_t *lock, bool running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, struct gnet_stats_rate_est64 *sample); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ &am33xx_pinmux { cpsw_default: cpsw-default-pins { pinctrl-single,pins = < /* Slave 1 */ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd3.rgmii1_td3 */ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd2.rgmii1_td2 */ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd1.rgmii1_td1 */ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd0.rgmii1_td0 */ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txclk.rgmii1_tclk */ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */ >; }; cpsw_sleep: cpsw-sleep-pins { pinctrl-single,pins = < /* Slave 1 reset value */ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7) >; }; usb_hub_ctrl: usb-hub-ctrl-pins { pinctrl-single,pins = < AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT_PULLUP, MUX_MODE7) /* rmii1_refclk.gpio0_29 */ >; }; }; &mac_sw { pinctrl-0 = <&cpsw_default>; pinctrl-1 = <&cpsw_sleep>; }; &cpsw_port1 { phy-mode = "rgmii-id"; }; &i2c0 { usb2512b: usb-hub@2c { pinctrl-names = "default"; pinctrl-0 = <&usb_hub_ctrl>; compatible = "microchip,usb2512b"; reg = <0x2c>; reset-gpios = <&gpio0 29 GPIO_ACTIVE_LOW>; }; };
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2021 BayLibre SAS * Author: Neil Armstrong <[email protected]> */ /dts-v1/; #include "meson-sm1-bananapi.dtsi" #include <dt-bindings/sound/meson-g12a-toacodec.h> #include <dt-bindings/sound/meson-g12a-tohdmitx.h> / { compatible = "bananapi,bpi-m5", "amlogic,sm1"; model = "Banana Pi BPI-M5"; /* TOFIX: handle CVBS_DET on SARADC channel 0 */ cvbs-connector { compatible = "composite-video-connector"; port { cvbs_connector_in: endpoint { remote-endpoint = <&cvbs_vdac_out>; }; }; }; sound { compatible = "amlogic,axg-sound-card"; model = "BPI-M5"; audio-widgets = "Line", "Lineout"; audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>, <&tdmin_b>, <&tdmin_c>; audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1", "TDMOUT_B IN 1", "FRDDR_B OUT 1", "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT", "TDMOUT_C IN 0", "FRDDR_A OUT 2", "TDMOUT_C IN 1", "FRDDR_B OUT 2", "TDMOUT_C IN 2", "FRDDR_C OUT 2", "TDM_C Playback", "TDMOUT_C OUT", "TDMIN_A IN 4", "TDM_B Loopback", "TDMIN_B IN 4", "TDM_B Loopback", "TDMIN_C IN 4", "TDM_B Loopback", "TDMIN_A IN 5", "TDM_C Loopback", "TDMIN_B IN 5", "TDM_C Loopback", "TDMIN_C IN 5", "TDM_C Loopback", "TODDR_A IN 0", "TDMIN_A OUT", "TODDR_B IN 0", "TDMIN_A OUT", "TODDR_C IN 0", "TDMIN_A OUT", "TODDR_A IN 1", "TDMIN_B OUT", "TODDR_B IN 1", "TDMIN_B OUT", "TODDR_C IN 1", "TDMIN_B OUT", "TODDR_A IN 2", "TDMIN_C OUT", "TODDR_B IN 2", "TDMIN_C OUT", "TODDR_C IN 2", "TDMIN_C OUT", "Lineout", "ACODEC LOLP", "Lineout", "ACODEC LORP"; clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; assigned-clock-parents = <0>, <0>, <0>; assigned-clock-rates = <294912000>, <270950400>, <393216000>; dai-link-0 { sound-dai = <&frddr_a>; }; dai-link-1 { sound-dai = <&frddr_b>; }; dai-link-2 { sound-dai = <&frddr_c>; }; dai-link-3 { sound-dai = <&toddr_a>; }; dai-link-4 { sound-dai = <&toddr_b>; }; dai-link-5 { sound-dai = <&toddr_c>; }; /* 8ch hdmi interface */ dai-link-6 { sound-dai = <&tdmif_b>; dai-format = "i2s"; dai-tdm-slot-tx-mask-0 = <1 1>; dai-tdm-slot-tx-mask-1 = <1 1>; dai-tdm-slot-tx-mask-2 = <1 1>; dai-tdm-slot-tx-mask-3 = <1 1>; mclk-fs = <256>; codec-0 { sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>; }; codec-1 { sound-dai = <&toacodec TOACODEC_IN_B>; }; }; /* i2s jack output interface */ dai-link-7 { sound-dai = <&tdmif_c>; dai-format = "i2s"; dai-tdm-slot-tx-mask-0 = <1 1>; mclk-fs = <256>; codec-0 { sound-dai = <&tohdmitx TOHDMITX_I2S_IN_C>; }; codec-1 { sound-dai = <&toacodec TOACODEC_IN_C>; }; }; /* hdmi glue */ dai-link-8 { sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>; codec { sound-dai = <&hdmi_tx>; }; }; /* acodec glue */ dai-link-9 { sound-dai = <&toacodec TOACODEC_OUT>; codec { sound-dai = <&acodec>; }; }; }; }; &acodec { AVDD-supply = <&vddao_1v8>; status = "okay"; }; &clkc_audio { status = "okay"; }; &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; }; }; &frddr_a { status = "okay"; }; &frddr_b { status = "okay"; }; &frddr_c { status = "okay"; }; &tdmif_b { status = "okay"; }; &tdmif_c { status = "okay"; }; &tdmin_a { status = "okay"; }; &tdmin_b { status = "okay"; }; &tdmin_c { status = "okay"; }; &tdmout_b { status = "okay"; }; &tdmout_c { status = "okay"; }; &toacodec { status = "okay"; }; &tohdmitx { status = "okay"; }; &toddr_a { status = "okay"; }; &toddr_b { status = "okay"; }; &toddr_c { status = "okay"; };
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * This is "two-way" interface, acting as the SHIM layer between driver * and PHY layer. The driver can optionally call this translation layer * to do some preprocessing, then reach PHY. On the PHY->driver direction, * all calls go through this layer since PHY doesn't have access to the * driver's brcms_hardware pointer. */ #include <linux/slab.h> #include <net/mac80211.h> #include "main.h" #include "mac80211_if.h" #include "phy_shim.h" /* PHY SHIM module specific state */ struct phy_shim_info { struct brcms_hardware *wlc_hw; /* pointer to main wlc_hw structure */ struct brcms_c_info *wlc; /* pointer to main wlc structure */ struct brcms_info *wl; /* pointer to os-specific private state */ }; struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw, struct brcms_info *wl, struct brcms_c_info *wlc) { struct phy_shim_info *physhim; physhim = kzalloc(sizeof(*physhim), GFP_ATOMIC); if (!physhim) return NULL; physhim->wlc_hw = wlc_hw; physhim->wlc = wlc; physhim->wl = wl; return physhim; } void wlc_phy_shim_detach(struct phy_shim_info *physhim) { kfree(physhim); } struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, void (*fn)(void *pi), void *arg, const char *name) { return (struct wlapi_timer *) brcms_init_timer(physhim->wl, fn, arg, name); } void wlapi_free_timer(struct wlapi_timer *t) { brcms_free_timer((struct brcms_timer *)t); } void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic) { brcms_add_timer((struct brcms_timer *)t, ms, periodic); } bool wlapi_del_timer(struct wlapi_timer *t) { return brcms_del_timer((struct brcms_timer *)t); } void wlapi_intrson(struct phy_shim_info *physhim) { brcms_intrson(physhim->wl); } u32 wlapi_intrsoff(struct phy_shim_info *physhim) { return brcms_intrsoff(physhim->wl); } void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask) { brcms_intrsrestore(physhim->wl, macintmask); } void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v) { brcms_b_write_shm(physhim->wlc_hw, offset, v); } u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset) { return brcms_b_read_shm(physhim->wlc_hw, offset); } void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val, int bands) { brcms_b_mhf(physhim->wlc_hw, idx, mask, val, bands); } void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags) { brcms_b_corereset(physhim->wlc_hw, flags); } void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim) { brcms_c_suspend_mac_and_wait(physhim->wlc); } void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode) { brcms_b_switch_macfreq(physhim->wlc_hw, spurmode); } void wlapi_enable_mac(struct phy_shim_info *physhim) { brcms_c_enable_mac(physhim->wlc); } void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val) { brcms_b_mctrl(physhim->wlc_hw, mask, val); } void wlapi_bmac_phy_reset(struct phy_shim_info *physhim) { brcms_b_phy_reset(physhim->wlc_hw); } void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw) { brcms_b_bw_set(physhim->wlc_hw, bw); } u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim) { return brcms_b_get_txant(physhim->wlc_hw); } void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk) { brcms_b_phyclk_fgc(physhim->wlc_hw, clk); } void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk) { brcms_b_macphyclk_set(physhim->wlc_hw, clk); } void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on) { brcms_b_core_phypll_ctl(physhim->wlc_hw, on); } void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim) { brcms_b_core_phypll_reset(physhim->wlc_hw); } void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim) { brcms_c_ucode_wake_override_set(physhim->wlc_hw, BRCMS_WAKE_OVERRIDE_PHYREG); } void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim) { brcms_c_ucode_wake_override_clear(physhim->wlc_hw, BRCMS_WAKE_OVERRIDE_PHYREG); } void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int offset, int len, void *buf) { brcms_b_write_template_ram(physhim->wlc_hw, offset, len, buf); } u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate) { return brcms_b_rate_shm_offset(physhim->wlc_hw, rate); } void wlapi_ucode_sample_init(struct phy_shim_info *physhim) { } void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint offset, void *buf, int len, u32 sel) { brcms_b_copyfrom_objmem(physhim->wlc_hw, offset, buf, len, sel); } void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf, int l, u32 sel) { brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC * * Copyright (C) 2013-2015 Corentin LABBE <[email protected]> * * This file add support for MD5 and SHA1. * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" #include <linux/unaligned.h> #include <linux/scatterlist.h> /* This is a totally arbitrary value */ #define SS_TIMEOUT 100 int sun4i_hash_crainit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct sun4i_ss_alg_template *algt; int err; memset(op, 0, sizeof(struct sun4i_tfm_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->ss = algt->ss; err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) return err; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct sun4i_req_ctx)); return 0; } void sun4i_hash_craexit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); pm_runtime_put(op->ss->dev); } /* sun4i_hash_init: initialize request context */ int sun4i_hash_init(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun4i_ss_alg_template *algt; memset(op, 0, sizeof(struct sun4i_req_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->mode = algt->mode; return 0; } int sun4i_hash_export_md5(struct ahash_request *areq, void *out) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct md5_state *octx = out; int i; octx->byte_count = op->byte_count + op->len; memcpy(octx->block, op->buf, op->len); if (op->byte_count) { for (i = 0; i < 4; i++) octx->hash[i] = op->hash[i]; } else { octx->hash[0] = SHA1_H0; octx->hash[1] = SHA1_H1; octx->hash[2] = SHA1_H2; octx->hash[3] = SHA1_H3; } return 0; } int sun4i_hash_import_md5(struct ahash_request *areq, const void *in) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); const struct md5_state *ictx = in; int i; sun4i_hash_init(areq); op->byte_count = ictx->byte_count & ~0x3F; op->len = ictx->byte_count & 0x3F; memcpy(op->buf, ictx->block, op->len); for (i = 0; i < 4; i++) op->hash[i] = ictx->hash[i]; return 0; } int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct sha1_state *octx = out; int i; octx->count = op->byte_count + op->len; memcpy(octx->buffer, op->buf, op->len); if (op->byte_count) { for (i = 0; i < 5; i++) octx->state[i] = op->hash[i]; } else { octx->state[0] = SHA1_H0; octx->state[1] = SHA1_H1; octx->state[2] = SHA1_H2; octx->state[3] = SHA1_H3; octx->state[4] = SHA1_H4; } return 0; } int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); const struct sha1_state *ictx = in; int i; sun4i_hash_init(areq); op->byte_count = ictx->count & ~0x3F; op->len = ictx->count & 0x3F; memcpy(op->buf, ictx->buffer, op->len); for (i = 0; i < 5; i++) op->hash[i] = ictx->state[i]; return 0; } #define SS_HASH_UPDATE 1 #define SS_HASH_FINAL 2 /* * sun4i_hash_update: update hash engine * * Could be used for both SHA1 and MD5 * Write data by step of 32bits and put then in the SS. * * Since we cannot leave partial data and hash state in the engine, * we need to get the hash state at the end of this function. * We can get the hash state every 64 bytes * * So the first work is to get the number of bytes to write to SS modulo 64 * The extra bytes will go to a temporary buffer op->buf storing op->len bytes * * So at the begin of update() * if op->len + areq->nbytes < 64 * => all data will be written to wait buffer (op->buf) and end=0 * if not, write all data from op->buf to the device and position end to * complete to 64bytes * * example 1: * update1 60o => op->len=60 * update2 60o => need one more word to have 64 bytes * end=4 * so write all data from op->buf and one word of SGs * write remaining data in op->buf * final state op->len=56 */ static int sun4i_hash(struct ahash_request *areq) { /* * i is the total bytes read from SGs, to be compared to areq->nbytes * i is important because we cannot rely on SG length since the sum of * SG->length could be greater than areq->nbytes * * end is the position when we need to stop writing to the device, * to be compared to i * * in_i: advancement in the current SG */ unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; unsigned int in_i = 0; u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0; struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); struct sun4i_ss_ctx *ss = tfmctx->ss; struct sun4i_ss_alg_template *algt; struct scatterlist *in_sg = areq->src; struct sg_mapping_iter mi; int in_r, err = 0; size_t copied = 0; u32 wb = 0; dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", __func__, crypto_tfm_alg_name(areq->base.tfm), op->byte_count, areq->nbytes, op->mode, op->len, op->hash[0]); if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL)) return 0; /* protect against overflow */ if (unlikely(areq->nbytes > UINT_MAX - op->len)) { dev_err(ss->dev, "Cannot process too large request\n"); return -EINVAL; } if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) { /* linearize data to op->buf */ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), op->buf + op->len, areq->nbytes, 0); op->len += copied; return 0; } spin_lock_bh(&ss->slock); /* * if some data have been processed before, * we need to restore the partial hash state */ if (op->byte_count) { ivmode = SS_IV_ARBITRARY; for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) writel(op->hash[i], ss->base + SS_IV0 + i * 4); } /* Enable the device */ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); if (!(op->flags & SS_HASH_UPDATE)) goto hash_final; /* start of handling data */ if (!(op->flags & SS_HASH_FINAL)) { end = ((areq->nbytes + op->len) / 64) * 64 - op->len; if (end > areq->nbytes || areq->nbytes - end > 63) { dev_err(ss->dev, "ERROR: Bound error %u %u\n", end, areq->nbytes); err = -EINVAL; goto release_ss; } } else { /* Since we have the flag final, we can go up to modulo 4 */ if (areq->nbytes < 4) end = 0; else end = ((areq->nbytes + op->len) / 4) * 4 - op->len; } /* TODO if SGlen % 4 and !op->len then DMA */ i = 1; while (in_sg && i == 1) { if (in_sg->length % 4) i = 0; in_sg = sg_next(in_sg); } if (i == 1 && !op->len && areq->nbytes) dev_dbg(ss->dev, "We can DMA\n"); i = 0; sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); in_i = 0; do { /* * we need to linearize in two case: * - the buffer is already used * - the SG does not have enough byte remaining ( < 4) */ if (op->len || (mi.length - in_i) < 4) { /* * if we have entered here we have two reason to stop * - the buffer is full * - reach the end */ while (op->len < 64 && i < end) { /* how many bytes we can read from current SG */ in_r = min(end - i, 64 - op->len); in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; in_i += in_r; if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } if (op->len > 3 && !(op->len % 4)) { /* write buf to the device */ writesl(ss->base + SS_RXFIFO, op->buf, op->len / 4); op->byte_count += op->len; op->len = 0; } } if (mi.length - in_i > 3 && i < end) { /* how many bytes we can read from current SG */ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i); in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r); /* how many bytes we can write in the device*/ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); op->byte_count += todo * 4; i += todo * 4; in_i += todo * 4; rx_cnt -= todo; if (!rx_cnt) { spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); } if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } } while (i < end); /* * Now we have written to the device all that we can, * store the remaining bytes in op->buf */ if ((areq->nbytes - i) < 64) { while (i < areq->nbytes && in_i < mi.length && op->len < 64) { /* how many bytes we can read from current SG */ in_r = min(areq->nbytes - i, 64 - op->len); in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; in_i += in_r; if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } } sg_miter_stop(&mi); /* * End of data process * Now if we have the flag final go to finalize part * If not, store the partial hash */ if (op->flags & SS_HASH_FINAL) goto hash_final; writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); i = 0; do { v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); err = -EIO; goto release_ss; } /* * The datasheet isn't very clear about when to retrieve the digest. The * bit SS_DATA_END is cleared when the engine has processed the data and * when the digest is computed *but* it doesn't mean the digest is * available in the digest registers. Hence the delay to be sure we can * read it. */ ndelay(1); for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) op->hash[i] = readl(ss->base + SS_MD0 + i * 4); goto release_ss; /* * hash_final: finalize hashing operation * * If we have some remaining bytes, we write them. * Then ask the SS for finalizing the hashing operation * * I do not check RX FIFO size in this function since the size is 32 * after each enabling and this function neither write more than 32 words. * If we come from the update part, we cannot have more than * 3 remaining bytes to write and SS is fast enough to not care about it. */ hash_final: if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); algt->stat_req++; } /* write the remaining words of the wait buffer */ if (op->len) { nwait = op->len / 4; if (nwait) { writesl(ss->base + SS_RXFIFO, op->buf, nwait); op->byte_count += 4 * nwait; } nbw = op->len - 4 * nwait; if (nbw) { wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4)); wb &= GENMASK((nbw * 8) - 1, 0); op->byte_count += nbw; } } /* write the remaining bytes of the nbw buffer */ wb |= ((1 << 7) << (nbw * 8)); ((__le32 *)bf)[j++] = cpu_to_le32(wb); /* * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) * I take the operations from other MD5/SHA1 implementations */ /* last block size */ fill = 64 - (op->byte_count % 64); min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); /* if we can't fill all data, jump to the next 64 block */ if (fill < min_fill) fill += 64; j += (fill - min_fill) / sizeof(u32); /* write the length of data */ if (op->mode == SS_OP_SHA1) { __be64 *bits = (__be64 *)&bf[j]; *bits = cpu_to_be64(op->byte_count << 3); j += 2; } else { __le64 *bits = (__le64 *)&bf[j]; *bits = cpu_to_le64(op->byte_count << 3); j += 2; } writesl(ss->base + SS_RXFIFO, bf, j); /* Tell the SS to stop the hashing */ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); /* * Wait for SS to finish the hash. * The timeout could happen only in case of bad overclocking * or driver bug. */ i = 0; do { v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); err = -EIO; goto release_ss; } /* * The datasheet isn't very clear about when to retrieve the digest. The * bit SS_DATA_END is cleared when the engine has processed the data and * when the digest is computed *but* it doesn't mean the digest is * available in the digest registers. Hence the delay to be sure we can * read it. */ ndelay(1); /* Get the hash from the device */ if (op->mode == SS_OP_SHA1) { for (i = 0; i < 5; i++) { v = readl(ss->base + SS_MD0 + i * 4); if (ss->variant->sha1_in_be) put_unaligned_le32(v, areq->result + i * 4); else put_unaligned_be32(v, areq->result + i * 4); } } else { for (i = 0; i < 4; i++) { v = readl(ss->base + SS_MD0 + i * 4); put_unaligned_le32(v, areq->result + i * 4); } } release_ss: writel(0, ss->base + SS_CTL); spin_unlock_bh(&ss->slock); return err; } int sun4i_hash_final(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_FINAL; return sun4i_hash(areq); } int sun4i_hash_update(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_UPDATE; return sun4i_hash(areq); } /* sun4i_hash_finup: finalize hashing operation after an update */ int sun4i_hash_finup(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; return sun4i_hash(areq); } /* combo of init/update/final functions */ int sun4i_hash_digest(struct ahash_request *areq) { int err; struct sun4i_req_ctx *op = ahash_request_ctx(areq); err = sun4i_hash_init(areq); if (err) return err; op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; return sun4i_hash(areq); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Hisilicon Hi3620 clock driver * * Copyright (c) 2012-2013 Hisilicon Limited. * Copyright (c) 2012-2013 Linaro Limited. * * Author: Haojian Zhuang <[email protected]> * Xin Li <[email protected]> */ #include <linux/kernel.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/slab.h> #include <dt-bindings/clock/hi3620-clock.h> #include "clk.h" /* clock parent list */ static const char *const timer0_mux_p[] __initconst = { "osc32k", "timerclk01", }; static const char *const timer1_mux_p[] __initconst = { "osc32k", "timerclk01", }; static const char *const timer2_mux_p[] __initconst = { "osc32k", "timerclk23", }; static const char *const timer3_mux_p[] __initconst = { "osc32k", "timerclk23", }; static const char *const timer4_mux_p[] __initconst = { "osc32k", "timerclk45", }; static const char *const timer5_mux_p[] __initconst = { "osc32k", "timerclk45", }; static const char *const timer6_mux_p[] __initconst = { "osc32k", "timerclk67", }; static const char *const timer7_mux_p[] __initconst = { "osc32k", "timerclk67", }; static const char *const timer8_mux_p[] __initconst = { "osc32k", "timerclk89", }; static const char *const timer9_mux_p[] __initconst = { "osc32k", "timerclk89", }; static const char *const uart0_mux_p[] __initconst = { "osc26m", "pclk", }; static const char *const uart1_mux_p[] __initconst = { "osc26m", "pclk", }; static const char *const uart2_mux_p[] __initconst = { "osc26m", "pclk", }; static const char *const uart3_mux_p[] __initconst = { "osc26m", "pclk", }; static const char *const uart4_mux_p[] __initconst = { "osc26m", "pclk", }; static const char *const spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; static const char *const spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; static const char *const spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; /* share axi parent */ static const char *const saxi_mux_p[] __initconst = { "armpll3", "armpll2", }; static const char *const pwm0_mux_p[] __initconst = { "osc32k", "osc26m", }; static const char *const pwm1_mux_p[] __initconst = { "osc32k", "osc26m", }; static const char *const sd_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const mmc1_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", }; static const char *const g2d_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const venc_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const vdec_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const vpp_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const edc0_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const ldi0_mux_p[] __initconst = { "armpll2", "armpll4", "armpll3", "armpll5", }; static const char *const edc1_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const ldi1_mux_p[] __initconst = { "armpll2", "armpll4", "armpll3", "armpll5", }; static const char *const rclk_hsic_p[] __initconst = { "armpll3", "armpll2", }; static const char *const mmc2_mux_p[] __initconst = { "armpll2", "armpll3", }; static const char *const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", }; /* fixed rate clocks */ static struct hisi_fixed_rate_clock hi3620_fixed_rate_clks[] __initdata = { { HI3620_OSC32K, "osc32k", NULL, 0, 32768, }, { HI3620_OSC26M, "osc26m", NULL, 0, 26000000, }, { HI3620_PCLK, "pclk", NULL, 0, 26000000, }, { HI3620_PLL_ARM0, "armpll0", NULL, 0, 1600000000, }, { HI3620_PLL_ARM1, "armpll1", NULL, 0, 1600000000, }, { HI3620_PLL_PERI, "armpll2", NULL, 0, 1440000000, }, { HI3620_PLL_USB, "armpll3", NULL, 0, 1440000000, }, { HI3620_PLL_HDMI, "armpll4", NULL, 0, 1188000000, }, { HI3620_PLL_GPU, "armpll5", NULL, 0, 1300000000, }, }; /* fixed factor clocks */ static struct hisi_fixed_factor_clock hi3620_fixed_factor_clks[] __initdata = { { HI3620_RCLK_TCXO, "rclk_tcxo", "osc26m", 1, 4, 0, }, { HI3620_RCLK_CFGAXI, "rclk_cfgaxi", "armpll2", 1, 30, 0, }, { HI3620_RCLK_PICO, "rclk_pico", "hsic_div", 1, 40, 0, }, }; static struct hisi_mux_clock hi3620_mux_clks[] __initdata = { { HI3620_TIMER0_MUX, "timer0_mux", timer0_mux_p, ARRAY_SIZE(timer0_mux_p), CLK_SET_RATE_PARENT, 0, 15, 2, 0, }, { HI3620_TIMER1_MUX, "timer1_mux", timer1_mux_p, ARRAY_SIZE(timer1_mux_p), CLK_SET_RATE_PARENT, 0, 17, 2, 0, }, { HI3620_TIMER2_MUX, "timer2_mux", timer2_mux_p, ARRAY_SIZE(timer2_mux_p), CLK_SET_RATE_PARENT, 0, 19, 2, 0, }, { HI3620_TIMER3_MUX, "timer3_mux", timer3_mux_p, ARRAY_SIZE(timer3_mux_p), CLK_SET_RATE_PARENT, 0, 21, 2, 0, }, { HI3620_TIMER4_MUX, "timer4_mux", timer4_mux_p, ARRAY_SIZE(timer4_mux_p), CLK_SET_RATE_PARENT, 0x18, 0, 2, 0, }, { HI3620_TIMER5_MUX, "timer5_mux", timer5_mux_p, ARRAY_SIZE(timer5_mux_p), CLK_SET_RATE_PARENT, 0x18, 2, 2, 0, }, { HI3620_TIMER6_MUX, "timer6_mux", timer6_mux_p, ARRAY_SIZE(timer6_mux_p), CLK_SET_RATE_PARENT, 0x18, 4, 2, 0, }, { HI3620_TIMER7_MUX, "timer7_mux", timer7_mux_p, ARRAY_SIZE(timer7_mux_p), CLK_SET_RATE_PARENT, 0x18, 6, 2, 0, }, { HI3620_TIMER8_MUX, "timer8_mux", timer8_mux_p, ARRAY_SIZE(timer8_mux_p), CLK_SET_RATE_PARENT, 0x18, 8, 2, 0, }, { HI3620_TIMER9_MUX, "timer9_mux", timer9_mux_p, ARRAY_SIZE(timer9_mux_p), CLK_SET_RATE_PARENT, 0x18, 10, 2, 0, }, { HI3620_UART0_MUX, "uart0_mux", uart0_mux_p, ARRAY_SIZE(uart0_mux_p), CLK_SET_RATE_PARENT, 0x100, 7, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_UART1_MUX, "uart1_mux", uart1_mux_p, ARRAY_SIZE(uart1_mux_p), CLK_SET_RATE_PARENT, 0x100, 8, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_UART2_MUX, "uart2_mux", uart2_mux_p, ARRAY_SIZE(uart2_mux_p), CLK_SET_RATE_PARENT, 0x100, 9, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_UART3_MUX, "uart3_mux", uart3_mux_p, ARRAY_SIZE(uart3_mux_p), CLK_SET_RATE_PARENT, 0x100, 10, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_UART4_MUX, "uart4_mux", uart4_mux_p, ARRAY_SIZE(uart4_mux_p), CLK_SET_RATE_PARENT, 0x100, 11, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_SPI0_MUX, "spi0_mux", spi0_mux_p, ARRAY_SIZE(spi0_mux_p), CLK_SET_RATE_PARENT, 0x100, 12, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_SPI1_MUX, "spi1_mux", spi1_mux_p, ARRAY_SIZE(spi1_mux_p), CLK_SET_RATE_PARENT, 0x100, 13, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_SPI2_MUX, "spi2_mux", spi2_mux_p, ARRAY_SIZE(spi2_mux_p), CLK_SET_RATE_PARENT, 0x100, 14, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_SAXI_MUX, "saxi_mux", saxi_mux_p, ARRAY_SIZE(saxi_mux_p), CLK_SET_RATE_PARENT, 0x100, 15, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_PWM0_MUX, "pwm0_mux", pwm0_mux_p, ARRAY_SIZE(pwm0_mux_p), CLK_SET_RATE_PARENT, 0x104, 10, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_PWM1_MUX, "pwm1_mux", pwm1_mux_p, ARRAY_SIZE(pwm1_mux_p), CLK_SET_RATE_PARENT, 0x104, 11, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_SD_MUX, "sd_mux", sd_mux_p, ARRAY_SIZE(sd_mux_p), CLK_SET_RATE_PARENT, 0x108, 4, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_MMC1_MUX, "mmc1_mux", mmc1_mux_p, ARRAY_SIZE(mmc1_mux_p), CLK_SET_RATE_PARENT, 0x108, 9, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_MMC1_MUX2, "mmc1_mux2", mmc1_mux2_p, ARRAY_SIZE(mmc1_mux2_p), CLK_SET_RATE_PARENT, 0x108, 10, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_G2D_MUX, "g2d_mux", g2d_mux_p, ARRAY_SIZE(g2d_mux_p), CLK_SET_RATE_PARENT, 0x10c, 5, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_VENC_MUX, "venc_mux", venc_mux_p, ARRAY_SIZE(venc_mux_p), CLK_SET_RATE_PARENT, 0x10c, 11, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_VDEC_MUX, "vdec_mux", vdec_mux_p, ARRAY_SIZE(vdec_mux_p), CLK_SET_RATE_PARENT, 0x110, 5, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_VPP_MUX, "vpp_mux", vpp_mux_p, ARRAY_SIZE(vpp_mux_p), CLK_SET_RATE_PARENT, 0x110, 11, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_EDC0_MUX, "edc0_mux", edc0_mux_p, ARRAY_SIZE(edc0_mux_p), CLK_SET_RATE_PARENT, 0x114, 6, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_LDI0_MUX, "ldi0_mux", ldi0_mux_p, ARRAY_SIZE(ldi0_mux_p), CLK_SET_RATE_PARENT, 0x114, 13, 2, CLK_MUX_HIWORD_MASK, }, { HI3620_EDC1_MUX, "edc1_mux", edc1_mux_p, ARRAY_SIZE(edc1_mux_p), CLK_SET_RATE_PARENT, 0x118, 6, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_LDI1_MUX, "ldi1_mux", ldi1_mux_p, ARRAY_SIZE(ldi1_mux_p), CLK_SET_RATE_PARENT, 0x118, 14, 2, CLK_MUX_HIWORD_MASK, }, { HI3620_RCLK_HSIC, "rclk_hsic", rclk_hsic_p, ARRAY_SIZE(rclk_hsic_p), CLK_SET_RATE_PARENT, 0x130, 2, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_MMC2_MUX, "mmc2_mux", mmc2_mux_p, ARRAY_SIZE(mmc2_mux_p), CLK_SET_RATE_PARENT, 0x140, 4, 1, CLK_MUX_HIWORD_MASK, }, { HI3620_MMC3_MUX, "mmc3_mux", mmc3_mux_p, ARRAY_SIZE(mmc3_mux_p), CLK_SET_RATE_PARENT, 0x140, 9, 1, CLK_MUX_HIWORD_MASK, }, }; static struct hisi_divider_clock hi3620_div_clks[] __initdata = { { HI3620_SHAREAXI_DIV, "saxi_div", "saxi_mux", 0, 0x100, 0, 5, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_CFGAXI_DIV, "cfgaxi_div", "saxi_div", 0, 0x100, 5, 2, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_SD_DIV, "sd_div", "sd_mux", 0, 0x108, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_MMC1_DIV, "mmc1_div", "mmc1_mux", 0, 0x108, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_HSIC_DIV, "hsic_div", "rclk_hsic", 0, 0x130, 0, 2, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_MMC2_DIV, "mmc2_div", "mmc2_mux", 0, 0x140, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, }, { HI3620_MMC3_DIV, "mmc3_div", "mmc3_mux", 0, 0x140, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, }, }; static struct hisi_gate_clock hi3620_separated_gate_clks[] __initdata = { { HI3620_TIMERCLK01, "timerclk01", "timer_rclk01", CLK_SET_RATE_PARENT, 0x20, 0, 0, }, { HI3620_TIMER_RCLK01, "timer_rclk01", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 1, 0, }, { HI3620_TIMERCLK23, "timerclk23", "timer_rclk23", CLK_SET_RATE_PARENT, 0x20, 2, 0, }, { HI3620_TIMER_RCLK23, "timer_rclk23", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 3, 0, }, { HI3620_RTCCLK, "rtcclk", "pclk", CLK_SET_RATE_PARENT, 0x20, 5, 0, }, { HI3620_KPC_CLK, "kpc_clk", "pclk", CLK_SET_RATE_PARENT, 0x20, 6, 0, }, { HI3620_GPIOCLK0, "gpioclk0", "pclk", CLK_SET_RATE_PARENT, 0x20, 8, 0, }, { HI3620_GPIOCLK1, "gpioclk1", "pclk", CLK_SET_RATE_PARENT, 0x20, 9, 0, }, { HI3620_GPIOCLK2, "gpioclk2", "pclk", CLK_SET_RATE_PARENT, 0x20, 10, 0, }, { HI3620_GPIOCLK3, "gpioclk3", "pclk", CLK_SET_RATE_PARENT, 0x20, 11, 0, }, { HI3620_GPIOCLK4, "gpioclk4", "pclk", CLK_SET_RATE_PARENT, 0x20, 12, 0, }, { HI3620_GPIOCLK5, "gpioclk5", "pclk", CLK_SET_RATE_PARENT, 0x20, 13, 0, }, { HI3620_GPIOCLK6, "gpioclk6", "pclk", CLK_SET_RATE_PARENT, 0x20, 14, 0, }, { HI3620_GPIOCLK7, "gpioclk7", "pclk", CLK_SET_RATE_PARENT, 0x20, 15, 0, }, { HI3620_GPIOCLK8, "gpioclk8", "pclk", CLK_SET_RATE_PARENT, 0x20, 16, 0, }, { HI3620_GPIOCLK9, "gpioclk9", "pclk", CLK_SET_RATE_PARENT, 0x20, 17, 0, }, { HI3620_GPIOCLK10, "gpioclk10", "pclk", CLK_SET_RATE_PARENT, 0x20, 18, 0, }, { HI3620_GPIOCLK11, "gpioclk11", "pclk", CLK_SET_RATE_PARENT, 0x20, 19, 0, }, { HI3620_GPIOCLK12, "gpioclk12", "pclk", CLK_SET_RATE_PARENT, 0x20, 20, 0, }, { HI3620_GPIOCLK13, "gpioclk13", "pclk", CLK_SET_RATE_PARENT, 0x20, 21, 0, }, { HI3620_GPIOCLK14, "gpioclk14", "pclk", CLK_SET_RATE_PARENT, 0x20, 22, 0, }, { HI3620_GPIOCLK15, "gpioclk15", "pclk", CLK_SET_RATE_PARENT, 0x20, 23, 0, }, { HI3620_GPIOCLK16, "gpioclk16", "pclk", CLK_SET_RATE_PARENT, 0x20, 24, 0, }, { HI3620_GPIOCLK17, "gpioclk17", "pclk", CLK_SET_RATE_PARENT, 0x20, 25, 0, }, { HI3620_GPIOCLK18, "gpioclk18", "pclk", CLK_SET_RATE_PARENT, 0x20, 26, 0, }, { HI3620_GPIOCLK19, "gpioclk19", "pclk", CLK_SET_RATE_PARENT, 0x20, 27, 0, }, { HI3620_GPIOCLK20, "gpioclk20", "pclk", CLK_SET_RATE_PARENT, 0x20, 28, 0, }, { HI3620_GPIOCLK21, "gpioclk21", "pclk", CLK_SET_RATE_PARENT, 0x20, 29, 0, }, { HI3620_DPHY0_CLK, "dphy0_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 15, 0, }, { HI3620_DPHY1_CLK, "dphy1_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 16, 0, }, { HI3620_DPHY2_CLK, "dphy2_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 17, 0, }, { HI3620_USBPHY_CLK, "usbphy_clk", "rclk_pico", CLK_SET_RATE_PARENT, 0x30, 24, 0, }, { HI3620_ACP_CLK, "acp_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x30, 28, 0, }, { HI3620_TIMERCLK45, "timerclk45", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 3, 0, }, { HI3620_TIMERCLK67, "timerclk67", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 4, 0, }, { HI3620_TIMERCLK89, "timerclk89", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 5, 0, }, { HI3620_PWMCLK0, "pwmclk0", "pwm0_mux", CLK_SET_RATE_PARENT, 0x40, 7, 0, }, { HI3620_PWMCLK1, "pwmclk1", "pwm1_mux", CLK_SET_RATE_PARENT, 0x40, 8, 0, }, { HI3620_UARTCLK0, "uartclk0", "uart0_mux", CLK_SET_RATE_PARENT, 0x40, 16, 0, }, { HI3620_UARTCLK1, "uartclk1", "uart1_mux", CLK_SET_RATE_PARENT, 0x40, 17, 0, }, { HI3620_UARTCLK2, "uartclk2", "uart2_mux", CLK_SET_RATE_PARENT, 0x40, 18, 0, }, { HI3620_UARTCLK3, "uartclk3", "uart3_mux", CLK_SET_RATE_PARENT, 0x40, 19, 0, }, { HI3620_UARTCLK4, "uartclk4", "uart4_mux", CLK_SET_RATE_PARENT, 0x40, 20, 0, }, { HI3620_SPICLK0, "spiclk0", "spi0_mux", CLK_SET_RATE_PARENT, 0x40, 21, 0, }, { HI3620_SPICLK1, "spiclk1", "spi1_mux", CLK_SET_RATE_PARENT, 0x40, 22, 0, }, { HI3620_SPICLK2, "spiclk2", "spi2_mux", CLK_SET_RATE_PARENT, 0x40, 23, 0, }, { HI3620_I2CCLK0, "i2cclk0", "pclk", CLK_SET_RATE_PARENT, 0x40, 24, 0, }, { HI3620_I2CCLK1, "i2cclk1", "pclk", CLK_SET_RATE_PARENT, 0x40, 25, 0, }, { HI3620_SCI_CLK, "sci_clk", "osc26m", CLK_SET_RATE_PARENT, 0x40, 26, 0, }, { HI3620_I2CCLK2, "i2cclk2", "pclk", CLK_SET_RATE_PARENT, 0x40, 28, 0, }, { HI3620_I2CCLK3, "i2cclk3", "pclk", CLK_SET_RATE_PARENT, 0x40, 29, 0, }, { HI3620_DDRC_PER_CLK, "ddrc_per_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 9, 0, }, { HI3620_DMAC_CLK, "dmac_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 10, 0, }, { HI3620_USB2DVC_CLK, "usb2dvc_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 17, 0, }, { HI3620_SD_CLK, "sd_clk", "sd_div", CLK_SET_RATE_PARENT, 0x50, 20, 0, }, { HI3620_MMC_CLK1, "mmc_clk1", "mmc1_mux2", CLK_SET_RATE_PARENT, 0x50, 21, 0, }, { HI3620_MMC_CLK2, "mmc_clk2", "mmc2_div", CLK_SET_RATE_PARENT, 0x50, 22, 0, }, { HI3620_MMC_CLK3, "mmc_clk3", "mmc3_div", CLK_SET_RATE_PARENT, 0x50, 23, 0, }, { HI3620_MCU_CLK, "mcu_clk", "acp_clk", CLK_SET_RATE_PARENT, 0x50, 24, 0, }, }; static void __init hi3620_clk_init(struct device_node *np) { struct hisi_clock_data *clk_data; clk_data = hisi_clk_init(np, HI3620_NR_CLKS); if (!clk_data) return; hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks, ARRAY_SIZE(hi3620_fixed_rate_clks), clk_data); hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks, ARRAY_SIZE(hi3620_fixed_factor_clks), clk_data); hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks), clk_data); hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks), clk_data); hisi_clk_register_gate_sep(hi3620_separated_gate_clks, ARRAY_SIZE(hi3620_separated_gate_clks), clk_data); } CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init); struct hisi_mmc_clock { unsigned int id; const char *name; const char *parent_name; unsigned long flags; u32 clken_reg; u32 clken_bit; u32 div_reg; u32 div_off; u32 div_bits; u32 drv_reg; u32 drv_off; u32 drv_bits; u32 sam_reg; u32 sam_off; u32 sam_bits; }; struct clk_mmc { struct clk_hw hw; u32 id; void __iomem *clken_reg; u32 clken_bit; void __iomem *div_reg; u32 div_off; u32 div_bits; void __iomem *drv_reg; u32 drv_off; u32 drv_bits; void __iomem *sam_reg; u32 sam_off; u32 sam_bits; }; #define to_mmc(_hw) container_of(_hw, struct clk_mmc, hw) static struct hisi_mmc_clock hi3620_mmc_clks[] __initdata = { { HI3620_SD_CIUCLK, "sd_bclk1", "sd_clk", CLK_SET_RATE_PARENT, 0x1f8, 0, 0x1f8, 1, 3, 0x1f8, 4, 4, 0x1f8, 8, 4}, { HI3620_MMC_CIUCLK1, "mmc_bclk1", "mmc_clk1", CLK_SET_RATE_PARENT, 0x1f8, 12, 0x1f8, 13, 3, 0x1f8, 16, 4, 0x1f8, 20, 4}, { HI3620_MMC_CIUCLK2, "mmc_bclk2", "mmc_clk2", CLK_SET_RATE_PARENT, 0x1f8, 24, 0x1f8, 25, 3, 0x1f8, 28, 4, 0x1fc, 0, 4}, { HI3620_MMC_CIUCLK3, "mmc_bclk3", "mmc_clk3", CLK_SET_RATE_PARENT, 0x1fc, 4, 0x1fc, 5, 3, 0x1fc, 8, 4, 0x1fc, 12, 4}, }; static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { switch (parent_rate) { case 26000000: return 13000000; case 180000000: return 25000000; case 360000000: return 50000000; case 720000000: return 100000000; case 1440000000: return 180000000; default: return parent_rate; } } static int mmc_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct clk_mmc *mclk = to_mmc(hw); if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { req->rate = 13000000; req->best_parent_rate = 26000000; } else if (req->rate <= 26000000) { req->rate = 25000000; req->best_parent_rate = 180000000; } else if (req->rate <= 52000000) { req->rate = 50000000; req->best_parent_rate = 360000000; } else if (req->rate <= 100000000) { req->rate = 100000000; req->best_parent_rate = 720000000; } else { /* max is 180M */ req->rate = 180000000; req->best_parent_rate = 1440000000; } return -EINVAL; } static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len) { u32 i; for (i = 0; i < len; i++) { if (para % 2) val |= 1 << (off + i); else val &= ~(1 << (off + i)); para = para >> 1; } return val; } static int mmc_clk_set_timing(struct clk_hw *hw, unsigned long rate) { struct clk_mmc *mclk = to_mmc(hw); unsigned long flags; u32 sam, drv, div, val; static DEFINE_SPINLOCK(mmc_clk_lock); switch (rate) { case 13000000: sam = 3; drv = 1; div = 1; break; case 25000000: sam = 13; drv = 6; div = 6; break; case 50000000: sam = 3; drv = 6; div = 6; break; case 100000000: sam = 6; drv = 4; div = 6; break; case 180000000: sam = 6; drv = 4; div = 7; break; default: return -EINVAL; } spin_lock_irqsave(&mmc_clk_lock, flags); val = readl_relaxed(mclk->clken_reg); val &= ~(1 << mclk->clken_bit); writel_relaxed(val, mclk->clken_reg); val = readl_relaxed(mclk->sam_reg); val = mmc_clk_delay(val, sam, mclk->sam_off, mclk->sam_bits); writel_relaxed(val, mclk->sam_reg); val = readl_relaxed(mclk->drv_reg); val = mmc_clk_delay(val, drv, mclk->drv_off, mclk->drv_bits); writel_relaxed(val, mclk->drv_reg); val = readl_relaxed(mclk->div_reg); val = mmc_clk_delay(val, div, mclk->div_off, mclk->div_bits); writel_relaxed(val, mclk->div_reg); val = readl_relaxed(mclk->clken_reg); val |= 1 << mclk->clken_bit; writel_relaxed(val, mclk->clken_reg); spin_unlock_irqrestore(&mmc_clk_lock, flags); return 0; } static int mmc_clk_prepare(struct clk_hw *hw) { struct clk_mmc *mclk = to_mmc(hw); unsigned long rate; if (mclk->id == HI3620_MMC_CIUCLK1) rate = 13000000; else rate = 25000000; return mmc_clk_set_timing(hw, rate); } static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { return mmc_clk_set_timing(hw, rate); } static const struct clk_ops clk_mmc_ops = { .prepare = mmc_clk_prepare, .determine_rate = mmc_clk_determine_rate, .set_rate = mmc_clk_set_rate, .recalc_rate = mmc_clk_recalc_rate, }; static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk, void __iomem *base, struct device_node *np) { struct clk_mmc *mclk; struct clk *clk; struct clk_init_data init; mclk = kzalloc(sizeof(*mclk), GFP_KERNEL); if (!mclk) return ERR_PTR(-ENOMEM); init.name = mmc_clk->name; init.ops = &clk_mmc_ops; init.flags = mmc_clk->flags; init.parent_names = (mmc_clk->parent_name ? &mmc_clk->parent_name : NULL); init.num_parents = (mmc_clk->parent_name ? 1 : 0); mclk->hw.init = &init; mclk->id = mmc_clk->id; mclk->clken_reg = base + mmc_clk->clken_reg; mclk->clken_bit = mmc_clk->clken_bit; mclk->div_reg = base + mmc_clk->div_reg; mclk->div_off = mmc_clk->div_off; mclk->div_bits = mmc_clk->div_bits; mclk->drv_reg = base + mmc_clk->drv_reg; mclk->drv_off = mmc_clk->drv_off; mclk->drv_bits = mmc_clk->drv_bits; mclk->sam_reg = base + mmc_clk->sam_reg; mclk->sam_off = mmc_clk->sam_off; mclk->sam_bits = mmc_clk->sam_bits; clk = clk_register(NULL, &mclk->hw); if (WARN_ON(IS_ERR(clk))) kfree(mclk); return clk; } static void __init hi3620_mmc_clk_init(struct device_node *node) { void __iomem *base; int i, num = ARRAY_SIZE(hi3620_mmc_clks); struct clk_onecell_data *clk_data; if (!node) { pr_err("failed to find pctrl node in DTS\n"); return; } base = of_iomap(node, 0); if (!base) { pr_err("failed to map pctrl\n"); return; } clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (WARN_ON(!clk_data)) return; clk_data->clks = kcalloc(num, sizeof(*clk_data->clks), GFP_KERNEL); if (!clk_data->clks) { kfree(clk_data); return; } for (i = 0; i < num; i++) { struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i]; clk_data->clks[mmc_clk->id] = hisi_register_clk_mmc(mmc_clk, base, node); } clk_data->clk_num = num; of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } CLK_OF_DECLARE(hi3620_mmc_clk, "hisilicon,hi3620-mmc-clock", hi3620_mmc_clk_init);
/* SPDX-License-Identifier: GPL-2.0 */ /* * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2018 Xilinx, Inc. * */ #ifndef _DT_BINDINGS_CLK_ZYNQMP_H #define _DT_BINDINGS_CLK_ZYNQMP_H #define IOPLL 0 #define RPLL 1 #define APLL 2 #define DPLL 3 #define VPLL 4 #define IOPLL_TO_FPD 5 #define RPLL_TO_FPD 6 #define APLL_TO_LPD 7 #define DPLL_TO_LPD 8 #define VPLL_TO_LPD 9 #define ACPU 10 #define ACPU_HALF 11 #define DBF_FPD 12 #define DBF_LPD 13 #define DBG_TRACE 14 #define DBG_TSTMP 15 #define DP_VIDEO_REF 16 #define DP_AUDIO_REF 17 #define DP_STC_REF 18 #define GDMA_REF 19 #define DPDMA_REF 20 #define DDR_REF 21 #define SATA_REF 22 #define PCIE_REF 23 #define GPU_REF 24 #define GPU_PP0_REF 25 #define GPU_PP1_REF 26 #define TOPSW_MAIN 27 #define TOPSW_LSBUS 28 #define GTGREF0_REF 29 #define LPD_SWITCH 30 #define LPD_LSBUS 31 #define USB0_BUS_REF 32 #define USB1_BUS_REF 33 #define USB3_DUAL_REF 34 #define USB0 35 #define USB1 36 #define CPU_R5 37 #define CPU_R5_CORE 38 #define CSU_SPB 39 #define CSU_PLL 40 #define PCAP 41 #define IOU_SWITCH 42 #define GEM_TSU_REF 43 #define GEM_TSU 44 #define GEM0_TX 45 #define GEM1_TX 46 #define GEM2_TX 47 #define GEM3_TX 48 #define GEM0_RX 49 #define GEM1_RX 50 #define GEM2_RX 51 #define GEM3_RX 52 #define QSPI_REF 53 #define SDIO0_REF 54 #define SDIO1_REF 55 #define UART0_REF 56 #define UART1_REF 57 #define SPI0_REF 58 #define SPI1_REF 59 #define NAND_REF 60 #define I2C0_REF 61 #define I2C1_REF 62 #define CAN0_REF 63 #define CAN1_REF 64 #define CAN0 65 #define CAN1 66 #define DLL_REF 67 #define ADMA_REF 68 #define TIMESTAMP_REF 69 #define AMS_REF 70 #define PL0_REF 71 #define PL1_REF 72 #define PL2_REF 73 #define PL3_REF 74 #define WDT 75 #define IOPLL_INT 76 #define IOPLL_PRE_SRC 77 #define IOPLL_HALF 78 #define IOPLL_INT_MUX 79 #define IOPLL_POST_SRC 80 #define RPLL_INT 81 #define RPLL_PRE_SRC 82 #define RPLL_HALF 83 #define RPLL_INT_MUX 84 #define RPLL_POST_SRC 85 #define APLL_INT 86 #define APLL_PRE_SRC 87 #define APLL_HALF 88 #define APLL_INT_MUX 89 #define APLL_POST_SRC 90 #define DPLL_INT 91 #define DPLL_PRE_SRC 92 #define DPLL_HALF 93 #define DPLL_INT_MUX 94 #define DPLL_POST_SRC 95 #define VPLL_INT 96 #define VPLL_PRE_SRC 97 #define VPLL_HALF 98 #define VPLL_INT_MUX 99 #define VPLL_POST_SRC 100 #define CAN0_MIO 101 #define CAN1_MIO 102 #define ACPU_FULL 103 #define GEM0_REF 104 #define GEM1_REF 105 #define GEM2_REF 106 #define GEM3_REF 107 #define GEM0_REF_UNG 108 #define GEM1_REF_UNG 109 #define GEM2_REF_UNG 110 #define GEM3_REF_UNG 111 #define LPD_WDT 112 #endif
// SPDX-License-Identifier: GPL-2.0-only /******************************************************************************* Copyright(c) 2004-2005 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen <[email protected]> Copyright (c) 2002-2003, Jouni Malinen <[email protected]> Contact Information: Intel Linux Wireless <[email protected]> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <net/net_namespace.h> #include <net/arp.h> #include "libipw.h" #define DRV_DESCRIPTION "802.11 data/management/control stack" #define DRV_NAME "libipw" #define DRV_PROCNAME "ieee80211" #define DRV_VERSION LIBIPW_VERSION #define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <[email protected]>" MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); static struct cfg80211_ops libipw_config_ops = { }; static void *libipw_wiphy_privid = &libipw_wiphy_privid; static int libipw_networks_allocate(struct libipw_device *ieee) { int i, j; for (i = 0; i < MAX_NETWORK_COUNT; i++) { ieee->networks[i] = kzalloc(sizeof(struct libipw_network), GFP_KERNEL); if (!ieee->networks[i]) { LIBIPW_ERROR("Out of memory allocating beacons\n"); for (j = 0; j < i; j++) kfree(ieee->networks[j]); return -ENOMEM; } } return 0; } static inline void libipw_networks_free(struct libipw_device *ieee) { int i; for (i = 0; i < MAX_NETWORK_COUNT; i++) kfree(ieee->networks[i]); } void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs) { struct libipw_network *network = NULL; unsigned long flags; unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); spin_lock_irqsave(&ieee->lock, flags); list_for_each_entry(network, &ieee->network_list, list) { network->last_scanned -= age_jiffies; } spin_unlock_irqrestore(&ieee->lock, flags); } EXPORT_SYMBOL(libipw_networks_age); static void libipw_networks_initialize(struct libipw_device *ieee) { int i; INIT_LIST_HEAD(&ieee->network_free_list); INIT_LIST_HEAD(&ieee->network_list); for (i = 0; i < MAX_NETWORK_COUNT; i++) list_add_tail(&ieee->networks[i]->list, &ieee->network_free_list); } struct net_device *alloc_libipw(int sizeof_priv, int monitor) { struct libipw_device *ieee; struct net_device *dev; int err; LIBIPW_DEBUG_INFO("Initializing...\n"); dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv); if (!dev) goto failed; ieee = netdev_priv(dev); ieee->dev = dev; if (!monitor) { ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0); if (!ieee->wdev.wiphy) { LIBIPW_ERROR("Unable to allocate wiphy.\n"); goto failed_free_netdev; } ieee->dev->ieee80211_ptr = &ieee->wdev; ieee->wdev.iftype = NL80211_IFTYPE_STATION; /* Fill-out wiphy structure bits we know... Not enough info here to call set_wiphy_dev or set MAC address or channel info -- have to do that in ->ndo_init... */ ieee->wdev.wiphy->privid = libipw_wiphy_privid; ieee->wdev.wiphy->max_scan_ssids = 1; ieee->wdev.wiphy->max_scan_ie_len = 0; ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); } err = libipw_networks_allocate(ieee); if (err) { LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); goto failed_free_wiphy; } libipw_networks_initialize(ieee); /* Default fragmentation threshold is maximum payload size */ ieee->fts = DEFAULT_FTS; ieee->rts = DEFAULT_FTS; ieee->scan_age = DEFAULT_MAX_SCAN_AGE; ieee->open_wep = 1; /* Default to enabling full open WEP with host based encrypt/decrypt */ ieee->host_encrypt = 1; ieee->host_decrypt = 1; ieee->host_mc_decrypt = 1; /* Host fragmentation in Open mode. Default is enabled. * Note: host fragmentation is always enabled if host encryption * is enabled. For cards can do hardware encryption, they must do * hardware fragmentation as well. So we don't need a variable * like host_enc_frag. */ ieee->host_open_frag = 1; ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ spin_lock_init(&ieee->lock); libipw_crypt_info_init(&ieee->crypt_info, dev->name, &ieee->lock); ieee->wpa_enabled = 0; ieee->drop_unencrypted = 0; ieee->privacy_invoked = 0; return dev; failed_free_wiphy: if (!monitor) wiphy_free(ieee->wdev.wiphy); failed_free_netdev: free_netdev(dev); failed: return NULL; } EXPORT_SYMBOL(alloc_libipw); void free_libipw(struct net_device *dev, int monitor) { struct libipw_device *ieee = netdev_priv(dev); libipw_crypt_info_free(&ieee->crypt_info); libipw_networks_free(ieee); /* free cfg80211 resources */ if (!monitor) wiphy_free(ieee->wdev.wiphy); free_netdev(dev); } EXPORT_SYMBOL(free_libipw); #ifdef CONFIG_LIBIPW_DEBUG static int debug = 0; u32 libipw_debug_level = 0; EXPORT_SYMBOL_GPL(libipw_debug_level); static struct proc_dir_entry *libipw_proc = NULL; static int debug_level_proc_show(struct seq_file *m, void *v) { seq_printf(m, "0x%08X\n", libipw_debug_level); return 0; } static int debug_level_proc_open(struct inode *inode, struct file *file) { return single_open(file, debug_level_proc_show, NULL); } static ssize_t debug_level_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[] = "0x00000000\n"; size_t len = min(sizeof(buf) - 1, count); unsigned long val; if (copy_from_user(buf, buffer, len)) return count; buf[len] = 0; if (sscanf(buf, "%li", &val) != 1) printk(KERN_INFO DRV_NAME ": %s is not in hex or decimal form.\n", buf); else libipw_debug_level = val; return strnlen(buf, len); } static const struct proc_ops debug_level_proc_ops = { .proc_open = debug_level_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = debug_level_proc_write, }; #endif /* CONFIG_LIBIPW_DEBUG */ static int __init libipw_init(void) { int err; #ifdef CONFIG_LIBIPW_DEBUG struct proc_dir_entry *e; libipw_debug_level = debug; libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net); if (libipw_proc == NULL) { LIBIPW_ERROR("Unable to create " DRV_PROCNAME " proc directory\n"); return -EIO; } e = proc_create("debug_level", 0644, libipw_proc, &debug_level_proc_ops); if (!e) { remove_proc_entry(DRV_PROCNAME, init_net.proc_net); libipw_proc = NULL; return -EIO; } #endif /* CONFIG_LIBIPW_DEBUG */ printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); err = libipw_crypto_init(); if (err) goto remove_debugfs; err = libipw_crypto_ccmp_init(); if (err) goto uninit_crypto; err = libipw_crypto_tkip_init(); if (err) goto uninit_crypto_ccmp; err = libipw_crypto_wep_init(); if (err) goto uninit_crypto_tkip; return 0; uninit_crypto_tkip: libipw_crypto_tkip_exit(); uninit_crypto_ccmp: libipw_crypto_ccmp_exit(); uninit_crypto: libipw_crypto_exit(); remove_debugfs: #ifdef CONFIG_LIBIPW_DEBUG remove_proc_entry("debug_level", libipw_proc); remove_proc_entry(DRV_PROCNAME, init_net.proc_net); libipw_proc = NULL; #endif return err; } static void __exit libipw_exit(void) { #ifdef CONFIG_LIBIPW_DEBUG if (libipw_proc) { remove_proc_entry("debug_level", libipw_proc); remove_proc_entry(DRV_PROCNAME, init_net.proc_net); libipw_proc = NULL; } #endif /* CONFIG_LIBIPW_DEBUG */ libipw_crypto_ccmp_exit(); libipw_crypto_tkip_exit(); libipw_crypto_wep_exit(); libipw_crypto_exit(); } #ifdef CONFIG_LIBIPW_DEBUG #include <linux/moduleparam.h> module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "debug output mask"); #endif /* CONFIG_LIBIPW_DEBUG */ module_exit(libipw_exit); module_init(libipw_init);
// SPDX-License-Identifier: GPL-2.0-or-later /* IRC extension for TCP NAT alteration. * * (C) 2000-2001 by Harald Welte <[email protected]> * (C) 2004 Rusty Russell <[email protected]> IBM Corporation * based on a copy of RR's ip_nat_ftp.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/tcp.h> #include <linux/kernel.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <linux/netfilter/nf_conntrack_irc.h> #define NAT_HELPER_NAME "irc" MODULE_AUTHOR("Harald Welte <[email protected]>"); MODULE_DESCRIPTION("IRC (DCC) NAT helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME); static struct nf_conntrack_nat_helper nat_helper_irc = NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME); static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; struct nf_conn *ct = exp->master; union nf_inet_addr newaddr; u_int16_t port; /* Reply comes from server. */ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port)); if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; } /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 * * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, * 255.255.255.255==4294967296, 10 digits) * P: bound port (min 1 d, max 5d (65635)) * F: filename (min 1 d ) * S: size (min 1 d ) * 0x01, \n: terminators */ /* AAA = "us", ie. where server normally talks to. */ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); pr_debug("inserting '%s' == %pI4, port %u\n", buffer, &newaddr.ip, port); if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer))) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); return NF_DROP; } return NF_ACCEPT; } static void __exit nf_nat_irc_fini(void) { nf_nat_helper_unregister(&nat_helper_irc); RCU_INIT_POINTER(nf_nat_irc_hook, NULL); synchronize_rcu(); } static int __init nf_nat_irc_init(void) { BUG_ON(nf_nat_irc_hook != NULL); nf_nat_helper_register(&nat_helper_irc); RCU_INIT_POINTER(nf_nat_irc_hook, help); return 0; } /* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ static int warn_set(const char *val, const struct kernel_param *kp) { pr_info("kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); return 0; } module_param_call(ports, warn_set, NULL, NULL, 0); module_init(nf_nat_irc_init); module_exit(nf_nat_irc_fini);
/* SPDX-License-Identifier: GPL-2.0-only */ /* * spu hypervisor abstraction for direct hardware access. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. */ #ifndef SPU_PRIV1_MMIO_H #define SPU_PRIV1_MMIO_H struct device_node *spu_devnode(struct spu *spu); #endif /* SPU_PRIV1_MMIO_H */
/* * dvb_net.h * * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef _DVB_NET_H_ #define _DVB_NET_H_ #include <linux/module.h> #include <media/dvbdev.h> struct net_device; #define DVB_NET_DEVICES_MAX 10 #ifdef CONFIG_DVB_NET /** * struct dvb_net - describes a DVB network interface * * @dvbdev: pointer to &struct dvb_device. * @device: array of pointers to &struct net_device. * @state: array of integers to each net device. A value * different than zero means that the interface is * in usage. * @exit: flag to indicate when the device is being removed. * @demux: pointer to &struct dmx_demux. * @ioctl_mutex: protect access to this struct. * @remove_mutex: mutex that avoids a race condition between a callback * called when the hardware is disconnected and the * file_operations of dvb_net. * * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network * devices. */ struct dvb_net { struct dvb_device *dvbdev; struct net_device *device[DVB_NET_DEVICES_MAX]; int state[DVB_NET_DEVICES_MAX]; unsigned int exit:1; struct dmx_demux *demux; struct mutex ioctl_mutex; struct mutex remove_mutex; }; /** * dvb_net_init - nitializes a digital TV network device and registers it. * * @adap: pointer to &struct dvb_adapter. * @dvbnet: pointer to &struct dvb_net. * @dmxdemux: pointer to &struct dmx_demux. */ int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, struct dmx_demux *dmxdemux); /** * dvb_net_release - releases a digital TV network device and unregisters it. * * @dvbnet: pointer to &struct dvb_net. */ void dvb_net_release(struct dvb_net *dvbnet); #else struct dvb_net { struct dvb_device *dvbdev; }; static inline void dvb_net_release(struct dvb_net *dvbnet) { } static inline int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, struct dmx_demux *dmx) { return 0; } #endif /* ifdef CONFIG_DVB_NET */ #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/dt-bindings/media/omap3-isp.h * * Copyright (C) 2015 Sakari Ailus */ #ifndef __DT_BINDINGS_OMAP3_ISP_H__ #define __DT_BINDINGS_OMAP3_ISP_H__ #define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0 #define OMAP3ISP_PHY_TYPE_CSIPHY 1 #endif /* __DT_BINDINGS_OMAP3_ISP_H__ */
/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_MPU401_H #define __SOUND_MPU401_H /* * Header file for MPU-401 and compatible cards * Copyright (c) by Jaroslav Kysela <[email protected]> */ #include <sound/rawmidi.h> #include <linux/interrupt.h> #define MPU401_HW_MPU401 1 /* native MPU401 */ #define MPU401_HW_SB 2 /* SoundBlaster MPU-401 UART */ #define MPU401_HW_ES1688 3 /* AudioDrive ES1688 MPU-401 UART */ #define MPU401_HW_OPL3SA2 4 /* Yamaha OPL3-SA2 */ #define MPU401_HW_SONICVIBES 5 /* S3 SonicVibes */ #define MPU401_HW_CS4232 6 /* CS4232 */ #define MPU401_HW_ES18XX 7 /* AudioDrive ES18XX MPU-401 UART */ #define MPU401_HW_FM801 8 /* ForteMedia FM801 */ #define MPU401_HW_TRID4DWAVE 9 /* Trident 4DWave */ #define MPU401_HW_AZT2320 10 /* Aztech AZT2320 */ #define MPU401_HW_ALS100 11 /* Avance Logic ALS100 */ #define MPU401_HW_ICE1712 12 /* Envy24 */ #define MPU401_HW_VIA686A 13 /* VIA 82C686A */ #define MPU401_HW_YMFPCI 14 /* YMF DS-XG PCI */ #define MPU401_HW_CMIPCI 15 /* CMIPCI MPU-401 UART */ #define MPU401_HW_ALS4000 16 /* Avance Logic ALS4000 */ #define MPU401_HW_INTEL8X0 17 /* Intel8x0 driver */ #define MPU401_HW_PC98II 18 /* Roland PC98II */ #define MPU401_HW_AUREAL 19 /* Aureal Vortex */ #define MPU401_INFO_INPUT (1 << 0) /* input stream */ #define MPU401_INFO_OUTPUT (1 << 1) /* output stream */ #define MPU401_INFO_INTEGRATED (1 << 2) /* integrated h/w port */ #define MPU401_INFO_MMIO (1 << 3) /* MMIO access */ #define MPU401_INFO_TX_IRQ (1 << 4) /* independent TX irq */ #define MPU401_INFO_IRQ_HOOK (1 << 5) /* mpu401 irq handler is called from driver irq handler */ #define MPU401_INFO_NO_ACK (1 << 6) /* No ACK cmd needed */ #define MPU401_INFO_USE_TIMER (1 << 15) /* internal */ #define MPU401_MODE_BIT_INPUT 0 #define MPU401_MODE_BIT_OUTPUT 1 #define MPU401_MODE_BIT_INPUT_TRIGGER 2 #define MPU401_MODE_BIT_OUTPUT_TRIGGER 3 #define MPU401_MODE_INPUT (1<<MPU401_MODE_BIT_INPUT) #define MPU401_MODE_OUTPUT (1<<MPU401_MODE_BIT_OUTPUT) #define MPU401_MODE_INPUT_TRIGGER (1<<MPU401_MODE_BIT_INPUT_TRIGGER) #define MPU401_MODE_OUTPUT_TRIGGER (1<<MPU401_MODE_BIT_OUTPUT_TRIGGER) #define MPU401_MODE_INPUT_TIMER (1<<0) #define MPU401_MODE_OUTPUT_TIMER (1<<1) struct snd_mpu401 { struct snd_rawmidi *rmidi; unsigned short hardware; /* MPU401_HW_XXXX */ unsigned int info_flags; /* MPU401_INFO_XXX */ unsigned long port; /* base port of MPU-401 chip */ unsigned long cport; /* port + 1 (usually) */ struct resource *res; /* port resource */ int irq; /* IRQ number of MPU-401 chip */ unsigned long mode; /* MPU401_MODE_XXXX */ int timer_invoked; int (*open_input) (struct snd_mpu401 * mpu); void (*close_input) (struct snd_mpu401 * mpu); int (*open_output) (struct snd_mpu401 * mpu); void (*close_output) (struct snd_mpu401 * mpu); void *private_data; struct snd_rawmidi_substream *substream_input; struct snd_rawmidi_substream *substream_output; spinlock_t input_lock; spinlock_t output_lock; spinlock_t timer_lock; struct timer_list timer; void (*write) (struct snd_mpu401 * mpu, unsigned char data, unsigned long addr); unsigned char (*read) (struct snd_mpu401 *mpu, unsigned long addr); }; /* I/O ports */ #define MPU401C(mpu) (mpu)->cport #define MPU401D(mpu) (mpu)->port /* * control register bits */ /* read MPU401C() */ #define MPU401_RX_EMPTY 0x80 #define MPU401_TX_FULL 0x40 /* write MPU401C() */ #define MPU401_RESET 0xff #define MPU401_ENTER_UART 0x3f /* read MPU401D() */ #define MPU401_ACK 0xfe /* */ irqreturn_t snd_mpu401_uart_interrupt(int irq, void *dev_id); irqreturn_t snd_mpu401_uart_interrupt_tx(int irq, void *dev_id); int snd_mpu401_uart_new(struct snd_card *card, int device, unsigned short hardware, unsigned long port, unsigned int info_flags, int irq, struct snd_rawmidi ** rrawmidi); #endif /* __SOUND_MPU401_H */
// SPDX-License-Identifier: GPL-2.0-only /* * w1_ds28e17.c - w1 family 19 (DS28E17) driver * * Copyright (c) 2016 Jan Kandziora <[email protected]> */ #include <linux/crc16.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #define CRC16_INIT 0 #include <linux/w1.h> #define W1_FAMILY_DS28E17 0x19 /* Module setup. */ MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jan Kandziora <[email protected]>"); MODULE_DESCRIPTION("w1 family 19 driver for DS28E17, 1-wire to I2C master bridge"); MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E17)); /* Default I2C speed to be set when a DS28E17 is detected. */ static int i2c_speed = 100; module_param_named(speed, i2c_speed, int, 0600); MODULE_PARM_DESC(speed, "Default I2C speed to be set when a DS28E17 is detected"); /* Default I2C stretch value to be set when a DS28E17 is detected. */ static char i2c_stretch = 1; module_param_named(stretch, i2c_stretch, byte, 0600); MODULE_PARM_DESC(stretch, "Default I2C stretch value to be set when a DS28E17 is detected"); /* DS28E17 device command codes. */ #define W1_F19_WRITE_DATA_WITH_STOP 0x4B #define W1_F19_WRITE_DATA_NO_STOP 0x5A #define W1_F19_WRITE_DATA_ONLY 0x69 #define W1_F19_WRITE_DATA_ONLY_WITH_STOP 0x78 #define W1_F19_READ_DATA_WITH_STOP 0x87 #define W1_F19_WRITE_READ_DATA_WITH_STOP 0x2D #define W1_F19_WRITE_CONFIGURATION 0xD2 #define W1_F19_READ_CONFIGURATION 0xE1 #define W1_F19_ENABLE_SLEEP_MODE 0x1E #define W1_F19_READ_DEVICE_REVISION 0xC4 /* DS28E17 status bits */ #define W1_F19_STATUS_CRC 0x01 #define W1_F19_STATUS_ADDRESS 0x02 #define W1_F19_STATUS_START 0x08 /* * Maximum number of I2C bytes to transfer within one CRC16 protected onewire * command. */ #define W1_F19_WRITE_DATA_LIMIT 255 /* Maximum number of I2C bytes to read with one onewire command. */ #define W1_F19_READ_DATA_LIMIT 255 /* Constants for calculating the busy sleep. */ #define W1_F19_BUSY_TIMEBASES { 90, 23, 10 } #define W1_F19_BUSY_GRATUITY 1000 /* Number of checks for the busy flag before timeout. */ #define W1_F19_BUSY_CHECKS 1000 /* Slave specific data. */ struct w1_f19_data { u8 speed; u8 stretch; struct i2c_adapter adapter; }; /* Wait a while until the busy flag clears. */ static int w1_f19_i2c_busy_wait(struct w1_slave *sl, size_t count) { const unsigned long timebases[3] = W1_F19_BUSY_TIMEBASES; struct w1_f19_data *data = sl->family_data; unsigned int checks; /* Check the busy flag first in any case.*/ if (w1_touch_bit(sl->master, 1) == 0) return 0; /* * Do a generously long sleep in the beginning, * as we have to wait at least this time for all * the I2C bytes at the given speed to be transferred. */ usleep_range(timebases[data->speed] * (data->stretch) * count, timebases[data->speed] * (data->stretch) * count + W1_F19_BUSY_GRATUITY); /* Now continusly check the busy flag sent by the DS28E17. */ checks = W1_F19_BUSY_CHECKS; while ((checks--) > 0) { /* Return success if the busy flag is cleared. */ if (w1_touch_bit(sl->master, 1) == 0) return 0; /* Wait one non-streched byte timeslot. */ udelay(timebases[data->speed]); } /* Timeout. */ dev_warn(&sl->dev, "busy timeout\n"); return -ETIMEDOUT; } /* Utility function: result. */ static size_t w1_f19_error(struct w1_slave *sl, u8 w1_buf[]) { /* Warnings. */ if (w1_buf[0] & W1_F19_STATUS_CRC) dev_warn(&sl->dev, "crc16 mismatch\n"); if (w1_buf[0] & W1_F19_STATUS_ADDRESS) dev_warn(&sl->dev, "i2c device not responding\n"); if ((w1_buf[0] & (W1_F19_STATUS_CRC | W1_F19_STATUS_ADDRESS)) == 0 && w1_buf[1] != 0) { dev_warn(&sl->dev, "i2c short write, %d bytes not acknowledged\n", w1_buf[1]); } /* Check error conditions. */ if (w1_buf[0] & W1_F19_STATUS_ADDRESS) return -ENXIO; if (w1_buf[0] & W1_F19_STATUS_START) return -EAGAIN; if (w1_buf[0] != 0 || w1_buf[1] != 0) return -EIO; /* All ok. */ return 0; } /* Utility function: write data to I2C slave, single chunk. */ static int __w1_f19_i2c_write(struct w1_slave *sl, const u8 *command, size_t command_count, const u8 *buffer, size_t count) { u16 crc; int error; u8 w1_buf[2]; /* Send command and I2C data to DS28E17. */ crc = crc16(CRC16_INIT, command, command_count); w1_write_block(sl->master, command, command_count); w1_buf[0] = count; crc = crc16(crc, w1_buf, 1); w1_write_8(sl->master, w1_buf[0]); crc = crc16(crc, buffer, count); w1_write_block(sl->master, buffer, count); w1_buf[0] = ~(crc & 0xFF); w1_buf[1] = ~((crc >> 8) & 0xFF); w1_write_block(sl->master, w1_buf, 2); /* Wait until busy flag clears (or timeout). */ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0) return -ETIMEDOUT; /* Read status from DS28E17. */ w1_read_block(sl->master, w1_buf, 2); /* Check error conditions. */ error = w1_f19_error(sl, w1_buf); if (error < 0) return error; /* Return number of bytes written. */ return count; } /* Write data to I2C slave. */ static int w1_f19_i2c_write(struct w1_slave *sl, u16 i2c_address, const u8 *buffer, size_t count, bool stop) { int result; int remaining = count; const u8 *p; u8 command[2]; /* Check input. */ if (count == 0) return -EOPNOTSUPP; /* Check whether we need multiple commands. */ if (count <= W1_F19_WRITE_DATA_LIMIT) { /* * Small data amount. Data can be sent with * a single onewire command. */ /* Send all data to DS28E17. */ command[0] = (stop ? W1_F19_WRITE_DATA_WITH_STOP : W1_F19_WRITE_DATA_NO_STOP); command[1] = i2c_address << 1; result = __w1_f19_i2c_write(sl, command, 2, buffer, count); } else { /* Large data amount. Data has to be sent in multiple chunks. */ /* Send first chunk to DS28E17. */ p = buffer; command[0] = W1_F19_WRITE_DATA_NO_STOP; command[1] = i2c_address << 1; result = __w1_f19_i2c_write(sl, command, 2, p, W1_F19_WRITE_DATA_LIMIT); if (result < 0) return result; /* Resume to same DS28E17. */ if (w1_reset_resume_command(sl->master)) return -EIO; /* Next data chunk. */ p += W1_F19_WRITE_DATA_LIMIT; remaining -= W1_F19_WRITE_DATA_LIMIT; while (remaining > W1_F19_WRITE_DATA_LIMIT) { /* Send intermediate chunk to DS28E17. */ command[0] = W1_F19_WRITE_DATA_ONLY; result = __w1_f19_i2c_write(sl, command, 1, p, W1_F19_WRITE_DATA_LIMIT); if (result < 0) return result; /* Resume to same DS28E17. */ if (w1_reset_resume_command(sl->master)) return -EIO; /* Next data chunk. */ p += W1_F19_WRITE_DATA_LIMIT; remaining -= W1_F19_WRITE_DATA_LIMIT; } /* Send final chunk to DS28E17. */ command[0] = (stop ? W1_F19_WRITE_DATA_ONLY_WITH_STOP : W1_F19_WRITE_DATA_ONLY); result = __w1_f19_i2c_write(sl, command, 1, p, remaining); } return result; } /* Read data from I2C slave. */ static int w1_f19_i2c_read(struct w1_slave *sl, u16 i2c_address, u8 *buffer, size_t count) { u16 crc; int error; u8 w1_buf[5]; /* Check input. */ if (count == 0) return -EOPNOTSUPP; /* Send command to DS28E17. */ w1_buf[0] = W1_F19_READ_DATA_WITH_STOP; w1_buf[1] = i2c_address << 1 | 0x01; w1_buf[2] = count; crc = crc16(CRC16_INIT, w1_buf, 3); w1_buf[3] = ~(crc & 0xFF); w1_buf[4] = ~((crc >> 8) & 0xFF); w1_write_block(sl->master, w1_buf, 5); /* Wait until busy flag clears (or timeout). */ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0) return -ETIMEDOUT; /* Read status from DS28E17. */ w1_buf[0] = w1_read_8(sl->master); w1_buf[1] = 0; /* Check error conditions. */ error = w1_f19_error(sl, w1_buf); if (error < 0) return error; /* Read received I2C data from DS28E17. */ return w1_read_block(sl->master, buffer, count); } /* Write to, then read data from I2C slave. */ static int w1_f19_i2c_write_read(struct w1_slave *sl, u16 i2c_address, const u8 *wbuffer, size_t wcount, u8 *rbuffer, size_t rcount) { u16 crc; int error; u8 w1_buf[3]; /* Check input. */ if (wcount == 0 || rcount == 0) return -EOPNOTSUPP; /* Send command and I2C data to DS28E17. */ w1_buf[0] = W1_F19_WRITE_READ_DATA_WITH_STOP; w1_buf[1] = i2c_address << 1; w1_buf[2] = wcount; crc = crc16(CRC16_INIT, w1_buf, 3); w1_write_block(sl->master, w1_buf, 3); crc = crc16(crc, wbuffer, wcount); w1_write_block(sl->master, wbuffer, wcount); w1_buf[0] = rcount; crc = crc16(crc, w1_buf, 1); w1_buf[1] = ~(crc & 0xFF); w1_buf[2] = ~((crc >> 8) & 0xFF); w1_write_block(sl->master, w1_buf, 3); /* Wait until busy flag clears (or timeout). */ if (w1_f19_i2c_busy_wait(sl, wcount + rcount + 2) < 0) return -ETIMEDOUT; /* Read status from DS28E17. */ w1_read_block(sl->master, w1_buf, 2); /* Check error conditions. */ error = w1_f19_error(sl, w1_buf); if (error < 0) return error; /* Read received I2C data from DS28E17. */ return w1_read_block(sl->master, rbuffer, rcount); } /* Do an I2C master transfer. */ static int w1_f19_i2c_master_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct w1_slave *sl = (struct w1_slave *) adapter->algo_data; int i = 0; int result = 0; /* Start onewire transaction. */ mutex_lock(&sl->master->bus_mutex); /* Select DS28E17. */ if (w1_reset_select_slave(sl)) { i = -EIO; goto error; } /* Loop while there are still messages to transfer. */ while (i < num) { /* * Check for special case: Small write followed * by read to same I2C device. */ if (i < (num-1) && msgs[i].addr == msgs[i+1].addr && !(msgs[i].flags & I2C_M_RD) && (msgs[i+1].flags & I2C_M_RD) && (msgs[i].len <= W1_F19_WRITE_DATA_LIMIT)) { /* * The DS28E17 has a combined transfer * for small write+read. */ result = w1_f19_i2c_write_read(sl, msgs[i].addr, msgs[i].buf, msgs[i].len, msgs[i+1].buf, msgs[i+1].len); if (result < 0) { i = result; goto error; } /* * Check if we should interpret the read data * as a length byte. The DS28E17 unfortunately * has no read without stop, so we can just do * another simple read in that case. */ if (msgs[i+1].flags & I2C_M_RECV_LEN) { result = w1_f19_i2c_read(sl, msgs[i+1].addr, &(msgs[i+1].buf[1]), msgs[i+1].buf[0]); if (result < 0) { i = result; goto error; } } /* Eat up read message, too. */ i++; } else if (msgs[i].flags & I2C_M_RD) { /* Read transfer. */ result = w1_f19_i2c_read(sl, msgs[i].addr, msgs[i].buf, msgs[i].len); if (result < 0) { i = result; goto error; } /* * Check if we should interpret the read data * as a length byte. The DS28E17 unfortunately * has no read without stop, so we can just do * another simple read in that case. */ if (msgs[i].flags & I2C_M_RECV_LEN) { result = w1_f19_i2c_read(sl, msgs[i].addr, &(msgs[i].buf[1]), msgs[i].buf[0]); if (result < 0) { i = result; goto error; } } } else { /* * Write transfer. * Stop condition only for last * transfer. */ result = w1_f19_i2c_write(sl, msgs[i].addr, msgs[i].buf, msgs[i].len, i == (num-1)); if (result < 0) { i = result; goto error; } } /* Next message. */ i++; /* Are there still messages to send/receive? */ if (i < num) { /* Yes. Resume to same DS28E17. */ if (w1_reset_resume_command(sl->master)) { i = -EIO; goto error; } } } error: /* End onewire transaction. */ mutex_unlock(&sl->master->bus_mutex); /* Return number of messages processed or error. */ return i; } /* Get I2C adapter functionality. */ static u32 w1_f19_i2c_functionality(struct i2c_adapter *adapter) { /* * Plain I2C functions only. * SMBus is emulated by the kernel's I2C layer. * No "I2C_FUNC_SMBUS_QUICK" * No "I2C_FUNC_SMBUS_READ_BLOCK_DATA" * No "I2C_FUNC_SMBUS_BLOCK_PROC_CALL" */ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PEC; } /* I2C adapter quirks. */ static const struct i2c_adapter_quirks w1_f19_i2c_adapter_quirks = { .max_read_len = W1_F19_READ_DATA_LIMIT, }; /* I2C algorithm. */ static const struct i2c_algorithm w1_f19_i2c_algorithm = { .master_xfer = w1_f19_i2c_master_transfer, .functionality = w1_f19_i2c_functionality, }; /* Read I2C speed from DS28E17. */ static int w1_f19_get_i2c_speed(struct w1_slave *sl) { struct w1_f19_data *data = sl->family_data; int result = -EIO; /* Start onewire transaction. */ mutex_lock(&sl->master->bus_mutex); /* Select slave. */ if (w1_reset_select_slave(sl)) goto error; /* Read slave configuration byte. */ w1_write_8(sl->master, W1_F19_READ_CONFIGURATION); result = w1_read_8(sl->master); if (result < 0 || result > 2) { result = -EIO; goto error; } /* Update speed in slave specific data. */ data->speed = result; error: /* End onewire transaction. */ mutex_unlock(&sl->master->bus_mutex); return result; } /* Set I2C speed on DS28E17. */ static int __w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed) { struct w1_f19_data *data = sl->family_data; const int i2c_speeds[3] = { 100, 400, 900 }; u8 w1_buf[2]; /* Select slave. */ if (w1_reset_select_slave(sl)) return -EIO; w1_buf[0] = W1_F19_WRITE_CONFIGURATION; w1_buf[1] = speed; w1_write_block(sl->master, w1_buf, 2); /* Update speed in slave specific data. */ data->speed = speed; dev_info(&sl->dev, "i2c speed set to %d kBaud\n", i2c_speeds[speed]); return 0; } static int w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed) { int result; /* Start onewire transaction. */ mutex_lock(&sl->master->bus_mutex); /* Set I2C speed on DS28E17. */ result = __w1_f19_set_i2c_speed(sl, speed); /* End onewire transaction. */ mutex_unlock(&sl->master->bus_mutex); return result; } /* Sysfs attributes. */ /* I2C speed attribute for a single chip. */ static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); int result; /* Read current speed from slave. Updates data->speed. */ result = w1_f19_get_i2c_speed(sl); if (result < 0) return result; /* Return current speed value. */ return sprintf(buf, "%d\n", result); } static ssize_t speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_slave *sl = dev_to_w1_slave(dev); int error; /* Valid values are: "100", "400", "900" */ if (count < 3 || count > 4 || !buf) return -EINVAL; if (count == 4 && buf[3] != '\n') return -EINVAL; if (buf[1] != '0' || buf[2] != '0') return -EINVAL; /* Set speed on slave. */ switch (buf[0]) { case '1': error = w1_f19_set_i2c_speed(sl, 0); break; case '4': error = w1_f19_set_i2c_speed(sl, 1); break; case '9': error = w1_f19_set_i2c_speed(sl, 2); break; default: return -EINVAL; } if (error < 0) return error; /* Return bytes written. */ return count; } static DEVICE_ATTR_RW(speed); /* Busy stretch attribute for a single chip. */ static ssize_t stretch_show(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); struct w1_f19_data *data = sl->family_data; /* Return current stretch value. */ return sprintf(buf, "%d\n", data->stretch); } static ssize_t stretch_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_slave *sl = dev_to_w1_slave(dev); struct w1_f19_data *data = sl->family_data; /* Valid values are '1' to '9' */ if (count < 1 || count > 2 || !buf) return -EINVAL; if (count == 2 && buf[1] != '\n') return -EINVAL; if (buf[0] < '1' || buf[0] > '9') return -EINVAL; /* Set busy stretch value. */ data->stretch = buf[0] & 0x0F; /* Return bytes written. */ return count; } static DEVICE_ATTR_RW(stretch); /* All attributes. */ static struct attribute *w1_f19_attrs[] = { &dev_attr_speed.attr, &dev_attr_stretch.attr, NULL, }; static const struct attribute_group w1_f19_group = { .attrs = w1_f19_attrs, }; static const struct attribute_group *w1_f19_groups[] = { &w1_f19_group, NULL, }; /* Slave add and remove functions. */ static int w1_f19_add_slave(struct w1_slave *sl) { struct w1_f19_data *data = NULL; /* Allocate memory for slave specific data. */ data = devm_kzalloc(&sl->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; sl->family_data = data; /* Setup default I2C speed on slave. */ switch (i2c_speed) { case 100: __w1_f19_set_i2c_speed(sl, 0); break; case 400: __w1_f19_set_i2c_speed(sl, 1); break; case 900: __w1_f19_set_i2c_speed(sl, 2); break; default: /* * A i2c_speed module parameter of anything else * than 100, 400, 900 means not to touch the * speed of the DS28E17. * We assume 400kBaud, the power-on value. */ data->speed = 1; } /* * Setup default busy stretch * configuration for the DS28E17. */ data->stretch = i2c_stretch; /* Setup I2C adapter. */ data->adapter.owner = THIS_MODULE; data->adapter.algo = &w1_f19_i2c_algorithm; data->adapter.algo_data = sl; strcpy(data->adapter.name, "w1-"); strcat(data->adapter.name, sl->name); data->adapter.dev.parent = &sl->dev; data->adapter.quirks = &w1_f19_i2c_adapter_quirks; return i2c_add_adapter(&data->adapter); } static void w1_f19_remove_slave(struct w1_slave *sl) { struct w1_f19_data *family_data = sl->family_data; /* Delete I2C adapter. */ i2c_del_adapter(&family_data->adapter); /* Free slave specific data. */ devm_kfree(&sl->dev, family_data); sl->family_data = NULL; } /* Declarations within the w1 subsystem. */ static const struct w1_family_ops w1_f19_fops = { .add_slave = w1_f19_add_slave, .remove_slave = w1_f19_remove_slave, .groups = w1_f19_groups, }; static struct w1_family w1_family_19 = { .fid = W1_FAMILY_DS28E17, .fops = &w1_f19_fops, }; module_w1_family(w1_family_19);
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2021 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include <linux/sched.h> #include <linux/export.h> #include "iwl-drv.h" #include "notif-wait.h" void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) { spin_lock_init(&notif_wait->notif_wait_lock); INIT_LIST_HEAD(&notif_wait->notif_waits); init_waitqueue_head(&notif_wait->notif_waitq); } IWL_EXPORT_SYMBOL(iwl_notification_wait_init); bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt) { bool triggered = false; if (!list_empty(&notif_wait->notif_waits)) { struct iwl_notification_wait *w; spin_lock_bh(&notif_wait->notif_wait_lock); list_for_each_entry(w, &notif_wait->notif_waits, list) { int i; bool found = false; /* * If it already finished (triggered) or has been * aborted then don't evaluate it again to avoid races, * Otherwise the function could be called again even * though it returned true before */ if (w->triggered || w->aborted) continue; for (i = 0; i < w->n_cmds; i++) { u16 rec_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (w->cmds[i] == rec_id || (!iwl_cmd_groupid(w->cmds[i]) && DEF_ID(w->cmds[i]) == rec_id)) { found = true; break; } } if (!found) continue; if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) { w->triggered = true; triggered = true; } } spin_unlock_bh(&notif_wait->notif_wait_lock); } return triggered; } IWL_EXPORT_SYMBOL(iwl_notification_wait); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) { struct iwl_notification_wait *wait_entry; spin_lock_bh(&notif_wait->notif_wait_lock); list_for_each_entry(wait_entry, &notif_wait->notif_waits, list) wait_entry->aborted = true; spin_unlock_bh(&notif_wait->notif_wait_lock); wake_up_all(&notif_wait->notif_waitq); } IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); void iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data), void *fn_data) { if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) n_cmds = MAX_NOTIF_CMDS; wait_entry->fn = fn; wait_entry->fn_data = fn_data; wait_entry->n_cmds = n_cmds; memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); wait_entry->triggered = false; wait_entry->aborted = false; spin_lock_bh(&notif_wait->notif_wait_lock); list_add(&wait_entry->list, &notif_wait->notif_waits); spin_unlock_bh(&notif_wait->notif_wait_lock); } IWL_EXPORT_SYMBOL(iwl_init_notification_wait); void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry) { spin_lock_bh(&notif_wait->notif_wait_lock); list_del(&wait_entry->list); spin_unlock_bh(&notif_wait->notif_wait_lock); } IWL_EXPORT_SYMBOL(iwl_remove_notification); int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, unsigned long timeout) { int ret; ret = wait_event_timeout(notif_wait->notif_waitq, wait_entry->triggered || wait_entry->aborted, timeout); iwl_remove_notification(notif_wait, wait_entry); if (wait_entry->aborted) return -EIO; /* return value is always >= 0 */ if (ret <= 0) return -ETIMEDOUT; return 0; } IWL_EXPORT_SYMBOL(iwl_wait_notification);
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "bios_parser_helper.h" #include "command_table_helper.h" #include "command_table.h" #include "bios_parser_types_internal.h" uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, uint32_t size) { if (bp->bios && offset + size < bp->bios_size) return bp->bios + offset; else return NULL; } #include "reg_helper.h" #define CTX \ bios->ctx #define REG(reg)\ (bios->regs->reg) #undef FN #define FN(reg_name, field_name) \ ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name bool bios_is_accelerated_mode( struct dc_bios *bios) { uint32_t acc_mode; REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode); return (acc_mode == 1); } void bios_set_scratch_acc_mode_change( struct dc_bios *bios, uint32_t state) { REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, state); } void bios_set_scratch_critical_state( struct dc_bios *bios, bool state) { uint32_t critial_state = state ? 1 : 0; REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); }
/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM intel_ifs #if !defined(_TRACE_IFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IFS_H #include <linux/ktime.h> #include <linux/tracepoint.h> TRACE_EVENT(ifs_status, TP_PROTO(int batch, int start, int stop, u64 status), TP_ARGS(batch, start, stop, status), TP_STRUCT__entry( __field( int, batch ) __field( u64, status ) __field( u16, start ) __field( u16, stop ) ), TP_fast_assign( __entry->batch = batch; __entry->start = start; __entry->stop = stop; __entry->status = status; ), TP_printk("batch: 0x%.2x, start: 0x%.4x, stop: 0x%.4x, status: 0x%.16llx", __entry->batch, __entry->start, __entry->stop, __entry->status) ); TRACE_EVENT(ifs_sbaf, TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status), TP_ARGS(batch, activate, status), TP_STRUCT__entry( __field( u64, status ) __field( int, batch ) __field( u16, bundle ) __field( u16, pgm ) ), TP_fast_assign( __entry->status = status.data; __entry->batch = batch; __entry->bundle = activate.bundle_idx; __entry->pgm = activate.pgm_idx; ), TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx", __entry->batch, __entry->bundle, __entry->pgm, __entry->status) ); #endif /* _TRACE_IFS_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
// SPDX-License-Identifier: GPL-2.0-only /* * fsgsbase.c, an fsgsbase test * Copyright (c) 2014-2016 Andy Lutomirski */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <sys/syscall.h> #include <unistd.h> #include <err.h> #include <sys/user.h> #include <asm/prctl.h> #include <sys/prctl.h> #include <signal.h> #include <limits.h> #include <sys/ucontext.h> #include <sched.h> #include <linux/futex.h> #include <pthread.h> #include <asm/ldt.h> #include <sys/mman.h> #include <stddef.h> #include <sys/ptrace.h> #include <sys/wait.h> #include <setjmp.h> #ifndef __x86_64__ # error This test is 64-bit only #endif static volatile sig_atomic_t want_segv; static volatile unsigned long segv_addr; static unsigned short *shared_scratch; static int nerrs; static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_sigaction = handler; sa.sa_flags = SA_SIGINFO | flags; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, 0)) err(1, "sigaction"); } static void clearhandler(int sig) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, 0)) err(1, "sigaction"); } static void sigsegv(int sig, siginfo_t *si, void *ctx_void) { ucontext_t *ctx = (ucontext_t*)ctx_void; if (!want_segv) { clearhandler(SIGSEGV); return; /* Crash cleanly. */ } want_segv = false; segv_addr = (unsigned long)si->si_addr; ctx->uc_mcontext.gregs[REG_RIP] += 4; /* Skip the faulting mov */ } static jmp_buf jmpbuf; static void sigill(int sig, siginfo_t *si, void *ctx_void) { siglongjmp(jmpbuf, 1); } static bool have_fsgsbase; static inline unsigned long rdgsbase(void) { unsigned long gsbase; asm volatile("rdgsbase %0" : "=r" (gsbase) :: "memory"); return gsbase; } static inline unsigned long rdfsbase(void) { unsigned long fsbase; asm volatile("rdfsbase %0" : "=r" (fsbase) :: "memory"); return fsbase; } static inline void wrgsbase(unsigned long gsbase) { asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory"); } enum which_base { FS, GS }; static unsigned long read_base(enum which_base which) { unsigned long offset; /* * Unless we have FSGSBASE, there's no direct way to do this from * user mode. We can get at it indirectly using signals, though. */ want_segv = true; offset = 0; if (which == FS) { /* Use a constant-length instruction here. */ asm volatile ("mov %%fs:(%%rcx), %%rax" : : "c" (offset) : "rax"); } else { asm volatile ("mov %%gs:(%%rcx), %%rax" : : "c" (offset) : "rax"); } if (!want_segv) return segv_addr + offset; /* * If that didn't segfault, try the other end of the address space. * Unless we get really unlucky and run into the vsyscall page, this * is guaranteed to segfault. */ offset = (ULONG_MAX >> 1) + 1; if (which == FS) { asm volatile ("mov %%fs:(%%rcx), %%rax" : : "c" (offset) : "rax"); } else { asm volatile ("mov %%gs:(%%rcx), %%rax" : : "c" (offset) : "rax"); } if (!want_segv) return segv_addr + offset; abort(); } static void check_gs_value(unsigned long value) { unsigned long base; unsigned short sel; printf("[RUN]\tARCH_SET_GS to 0x%lx\n", value); if (syscall(SYS_arch_prctl, ARCH_SET_GS, value) != 0) err(1, "ARCH_SET_GS"); asm volatile ("mov %%gs, %0" : "=rm" (sel)); base = read_base(GS); if (base == value) { printf("[OK]\tGSBASE was set as expected (selector 0x%hx)\n", sel); } else { nerrs++; printf("[FAIL]\tGSBASE was not as expected: got 0x%lx (selector 0x%hx)\n", base, sel); } if (syscall(SYS_arch_prctl, ARCH_GET_GS, &base) != 0) err(1, "ARCH_GET_GS"); if (base == value) { printf("[OK]\tARCH_GET_GS worked as expected (selector 0x%hx)\n", sel); } else { nerrs++; printf("[FAIL]\tARCH_GET_GS was not as expected: got 0x%lx (selector 0x%hx)\n", base, sel); } } static void mov_0_gs(unsigned long initial_base, bool schedule) { unsigned long base, arch_base; printf("[RUN]\tARCH_SET_GS to 0x%lx then mov 0 to %%gs%s\n", initial_base, schedule ? " and schedule " : ""); if (syscall(SYS_arch_prctl, ARCH_SET_GS, initial_base) != 0) err(1, "ARCH_SET_GS"); if (schedule) usleep(10); asm volatile ("mov %0, %%gs" : : "rm" (0)); base = read_base(GS); if (syscall(SYS_arch_prctl, ARCH_GET_GS, &arch_base) != 0) err(1, "ARCH_GET_GS"); if (base == arch_base) { printf("[OK]\tGSBASE is 0x%lx\n", base); } else { nerrs++; printf("[FAIL]\tGSBASE changed to 0x%lx but kernel reports 0x%lx\n", base, arch_base); } } static volatile unsigned long remote_base; static volatile unsigned int ftx; /* * ARCH_SET_FS/GS(0) may or may not program a selector of zero. HARD_ZERO * means to force the selector to zero to improve test coverage. */ #define HARD_ZERO 0xa1fa5f343cb85fa4 static void do_remote_base() { unsigned long to_set = remote_base; bool hard_zero = false; if (to_set == HARD_ZERO) { to_set = 0; hard_zero = true; } if (syscall(SYS_arch_prctl, ARCH_SET_GS, to_set) != 0) err(1, "ARCH_SET_GS"); if (hard_zero) asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0)); unsigned short sel; asm volatile ("mov %%gs, %0" : "=rm" (sel)); printf("\tother thread: ARCH_SET_GS(0x%lx)%s -- sel is 0x%hx\n", to_set, hard_zero ? " and clear gs" : "", sel); } static __thread int set_thread_area_entry_number = -1; static unsigned short load_gs(void) { /* * Sets GS != 0 and GSBASE != 0 but arranges for the kernel to think * that GSBASE == 0 (i.e. thread.gsbase == 0). */ /* Step 1: tell the kernel that we have GSBASE == 0. */ if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0) err(1, "ARCH_SET_GS"); /* Step 2: change GSBASE without telling the kernel. */ struct user_desc desc = { .entry_number = 0, .base_addr = 0xBAADF00D, .limit = 0xfffff, .seg_32bit = 1, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 1, .seg_not_present = 0, .useable = 0 }; if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) { printf("\tusing LDT slot 0\n"); asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0x7)); return 0x7; } else { /* No modify_ldt for us (configured out, perhaps) */ struct user_desc *low_desc = mmap( NULL, sizeof(desc), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0); memcpy(low_desc, &desc, sizeof(desc)); low_desc->entry_number = set_thread_area_entry_number; /* 32-bit set_thread_area */ long ret; asm volatile ("int $0x80" : "=a" (ret), "+m" (*low_desc) : "a" (243), "b" (low_desc) : "r8", "r9", "r10", "r11"); memcpy(&desc, low_desc, sizeof(desc)); munmap(low_desc, sizeof(desc)); if (ret != 0) { printf("[NOTE]\tcould not create a segment -- test won't do anything\n"); return 0; } printf("\tusing GDT slot %d\n", desc.entry_number); set_thread_area_entry_number = desc.entry_number; unsigned short gs = (unsigned short)((desc.entry_number << 3) | 0x3); asm volatile ("mov %0, %%gs" : : "rm" (gs)); return gs; } } void test_wrbase(unsigned short index, unsigned long base) { unsigned short newindex; unsigned long newbase; printf("[RUN]\tGS = 0x%hx, GSBASE = 0x%lx\n", index, base); asm volatile ("mov %0, %%gs" : : "rm" (index)); wrgsbase(base); remote_base = 0; ftx = 1; syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0); while (ftx != 0) syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0); asm volatile ("mov %%gs, %0" : "=rm" (newindex)); newbase = rdgsbase(); if (newindex == index && newbase == base) { printf("[OK]\tIndex and base were preserved\n"); } else { printf("[FAIL]\tAfter switch, GS = 0x%hx and GSBASE = 0x%lx\n", newindex, newbase); nerrs++; } } static void *threadproc(void *ctx) { while (1) { while (ftx == 0) syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0); if (ftx == 3) return NULL; if (ftx == 1) { do_remote_base(); } else if (ftx == 2) { /* * On AMD chips, this causes GSBASE != 0, GS == 0, and * thread.gsbase == 0. */ load_gs(); asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0)); } else { errx(1, "helper thread got bad command"); } ftx = 0; syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0); } } static void set_gs_and_switch_to(unsigned long local, unsigned short force_sel, unsigned long remote) { unsigned long base; unsigned short sel_pre_sched, sel_post_sched; bool hard_zero = false; if (local == HARD_ZERO) { hard_zero = true; local = 0; } printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n", local, hard_zero ? " and clear gs" : "", remote); if (force_sel) printf("\tBefore schedule, set selector to 0x%hx\n", force_sel); if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0) err(1, "ARCH_SET_GS"); if (hard_zero) asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0)); if (read_base(GS) != local) { nerrs++; printf("[FAIL]\tGSBASE wasn't set as expected\n"); } if (force_sel) { asm volatile ("mov %0, %%gs" : : "rm" (force_sel)); sel_pre_sched = force_sel; local = read_base(GS); /* * Signal delivery is quite likely to change a selector * of 1, 2, or 3 back to 0 due to IRET being defective. */ asm volatile ("mov %0, %%gs" : : "rm" (force_sel)); } else { asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched)); } remote_base = remote; ftx = 1; syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0); while (ftx != 0) syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0); asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched)); base = read_base(GS); if (base == local && sel_pre_sched == sel_post_sched) { printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n", sel_pre_sched, local); } else if (base == local && sel_pre_sched >= 1 && sel_pre_sched <= 3 && sel_post_sched == 0) { /* * IRET is misdesigned and will squash selectors 1, 2, or 3 * to zero. Don't fail the test just because this happened. */ printf("[OK]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx because IRET is defective\n", sel_pre_sched, local, sel_post_sched, base); } else { nerrs++; printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n", sel_pre_sched, local, sel_post_sched, base); } } static void test_unexpected_base(void) { unsigned long base; printf("[RUN]\tARCH_SET_GS(0), clear gs, then manipulate GSBASE in a different thread\n"); if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0) err(1, "ARCH_SET_GS"); asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0)); ftx = 2; syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0); while (ftx != 0) syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0); base = read_base(GS); if (base == 0) { printf("[OK]\tGSBASE remained 0\n"); } else { nerrs++; printf("[FAIL]\tGSBASE changed to 0x%lx\n", base); } } #define USER_REGS_OFFSET(r) offsetof(struct user_regs_struct, r) static void test_ptrace_write_gs_read_base(void) { int status; pid_t child = fork(); if (child < 0) err(1, "fork"); if (child == 0) { printf("[RUN]\tPTRACE_POKE GS, read GSBASE back\n"); printf("[RUN]\tARCH_SET_GS to 1\n"); if (syscall(SYS_arch_prctl, ARCH_SET_GS, 1) != 0) err(1, "ARCH_SET_GS"); if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) err(1, "PTRACE_TRACEME"); raise(SIGTRAP); _exit(0); } wait(&status); if (WSTOPSIG(status) == SIGTRAP) { unsigned long base; unsigned long gs_offset = USER_REGS_OFFSET(gs); unsigned long base_offset = USER_REGS_OFFSET(gs_base); /* Read the initial base. It should be 1. */ base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); if (base == 1) { printf("[OK]\tGSBASE started at 1\n"); } else { nerrs++; printf("[FAIL]\tGSBASE started at 0x%lx\n", base); } printf("[RUN]\tSet GS = 0x7, read GSBASE\n"); /* Poke an LDT selector into GS. */ if (ptrace(PTRACE_POKEUSER, child, gs_offset, 0x7) != 0) err(1, "PTRACE_POKEUSER"); /* And read the base. */ base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); if (base == 0 || base == 1) { printf("[OK]\tGSBASE reads as 0x%lx with invalid GS\n", base); } else { nerrs++; printf("[FAIL]\tGSBASE=0x%lx (should be 0 or 1)\n", base); } } ptrace(PTRACE_CONT, child, NULL, NULL); wait(&status); if (!WIFEXITED(status)) printf("[WARN]\tChild didn't exit cleanly.\n"); } static void test_ptrace_write_gsbase(void) { int status; pid_t child = fork(); if (child < 0) err(1, "fork"); if (child == 0) { printf("[RUN]\tPTRACE_POKE(), write GSBASE from ptracer\n"); *shared_scratch = load_gs(); if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) err(1, "PTRACE_TRACEME"); raise(SIGTRAP); _exit(0); } wait(&status); if (WSTOPSIG(status) == SIGTRAP) { unsigned long gs, base; unsigned long gs_offset = USER_REGS_OFFSET(gs); unsigned long base_offset = USER_REGS_OFFSET(gs_base); gs = ptrace(PTRACE_PEEKUSER, child, gs_offset, NULL); if (gs != *shared_scratch) { nerrs++; printf("[FAIL]\tGS is not prepared with nonzero\n"); goto END; } if (ptrace(PTRACE_POKEUSER, child, base_offset, 0xFF) != 0) err(1, "PTRACE_POKEUSER"); gs = ptrace(PTRACE_PEEKUSER, child, gs_offset, NULL); base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); /* * In a non-FSGSBASE system, the nonzero selector will load * GSBASE (again). But what is tested here is whether the * selector value is changed or not by the GSBASE write in * a ptracer. */ if (gs != *shared_scratch) { nerrs++; printf("[FAIL]\tGS changed to %lx\n", gs); /* * On older kernels, poking a nonzero value into the * base would zero the selector. On newer kernels, * this behavior has changed -- poking the base * changes only the base and, if FSGSBASE is not * available, this may have no effect once the tracee * is resumed. */ if (gs == 0) printf("\tNote: this is expected behavior on older kernels.\n"); } else if (have_fsgsbase && (base != 0xFF)) { nerrs++; printf("[FAIL]\tGSBASE changed to %lx\n", base); } else { printf("[OK]\tGS remained 0x%hx", *shared_scratch); if (have_fsgsbase) printf(" and GSBASE changed to 0xFF"); printf("\n"); } } END: ptrace(PTRACE_CONT, child, NULL, NULL); wait(&status); if (!WIFEXITED(status)) printf("[WARN]\tChild didn't exit cleanly.\n"); } int main() { pthread_t thread; shared_scratch = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0); /* Do these tests before we have an LDT. */ test_ptrace_write_gs_read_base(); /* Probe FSGSBASE */ sethandler(SIGILL, sigill, 0); if (sigsetjmp(jmpbuf, 1) == 0) { rdfsbase(); have_fsgsbase = true; printf("\tFSGSBASE instructions are enabled\n"); } else { printf("\tFSGSBASE instructions are disabled\n"); } clearhandler(SIGILL); sethandler(SIGSEGV, sigsegv, 0); check_gs_value(0); check_gs_value(1); check_gs_value(0x200000000); check_gs_value(0); check_gs_value(0x200000000); check_gs_value(1); for (int sched = 0; sched < 2; sched++) { mov_0_gs(0, !!sched); mov_0_gs(1, !!sched); mov_0_gs(0x200000000, !!sched); } /* Set up for multithreading. */ cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) err(1, "sched_setaffinity to CPU 0"); /* should never fail */ if (pthread_create(&thread, 0, threadproc, 0) != 0) err(1, "pthread_create"); static unsigned long bases_with_hard_zero[] = { 0, HARD_ZERO, 1, 0x200000000, }; for (int local = 0; local < 4; local++) { for (int remote = 0; remote < 4; remote++) { for (unsigned short s = 0; s < 5; s++) { unsigned short sel = s; if (s == 4) asm ("mov %%ss, %0" : "=rm" (sel)); set_gs_and_switch_to( bases_with_hard_zero[local], sel, bases_with_hard_zero[remote]); } } } test_unexpected_base(); if (have_fsgsbase) { unsigned short ss; asm volatile ("mov %%ss, %0" : "=rm" (ss)); test_wrbase(0, 0); test_wrbase(0, 1); test_wrbase(0, 0x200000000); test_wrbase(0, 0xffffffffffffffff); test_wrbase(ss, 0); test_wrbase(ss, 1); test_wrbase(ss, 0x200000000); test_wrbase(ss, 0xffffffffffffffff); } ftx = 3; /* Kill the thread. */ syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0); if (pthread_join(thread, NULL) != 0) err(1, "pthread_join"); test_ptrace_write_gsbase(); return nerrs == 0 ? 0 : 1; }
/* SPDX-License-Identifier: GPL-2.0 */ /****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * ******************************************************************************/ #ifndef _RTW_HT_H_ #define _RTW_HT_H_ struct ht_priv { u8 ht_option; u8 ampdu_enable;/* for enable Tx A-MPDU */ u8 tx_amsdu_enable;/* for enable Tx A-MSDU */ u8 bss_coexist;/* for 20/40 Bss coexist */ /* u8 baddbareq_issued[16]; */ u32 tx_amsdu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */ u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz, updated when join_callback. */ u8 rx_ampdu_min_spacing; u8 ch_offset;/* PRIME_CHNL_OFFSET */ u8 sgi_20m; u8 sgi_40m; /* for processing Tx A-MPDU */ u8 agg_enable_bitmap; /* u8 ADDBA_retry_count; */ u8 candidate_tid_bitmap; u8 ldpc_cap; u8 stbc_cap; u8 beamform_cap; struct ieee80211_ht_cap ht_cap; }; enum { HT_AGG_SIZE_8K = 0, HT_AGG_SIZE_16K = 1, HT_AGG_SIZE_32K = 2, HT_AGG_SIZE_64K = 3, }; enum { RT_HT_CAP_USE_TURBO_AGGR = 0x01, RT_HT_CAP_USE_LONG_PREAMBLE = 0x02, RT_HT_CAP_USE_AMPDU = 0x04, RT_HT_CAP_USE_WOW = 0x8, RT_HT_CAP_USE_SOFTAP = 0x10, RT_HT_CAP_USE_92SE = 0x20, RT_HT_CAP_USE_88C_92C = 0x40, RT_HT_CAP_USE_AP_CLIENT_MODE = 0x80, /* AP team request to reserve this bit, by Emily */ }; enum { RT_HT_CAP_USE_VIDEO_CLIENT = 0x01, RT_HT_CAP_USE_JAGUAR_BCUT = 0x02, RT_HT_CAP_USE_JAGUAR_CCUT = 0x04, }; #define LDPC_HT_ENABLE_RX BIT0 #define LDPC_HT_ENABLE_TX BIT1 #define LDPC_HT_CAP_TX BIT3 #define STBC_HT_ENABLE_RX BIT0 #define STBC_HT_ENABLE_TX BIT1 #define STBC_HT_CAP_TX BIT3 #define BEAMFORMING_HT_BEAMFORMER_ENABLE BIT0 /* Declare our NIC supports beamformer */ #define BEAMFORMING_HT_BEAMFORMEE_ENABLE BIT1 /* Declare our NIC supports beamformee */ /* 20/40 BSS Coexist */ #define SET_EXT_CAPABILITY_ELE_BSS_COEXIST(_pEleStart, _val) SET_BITS_TO_LE_1BYTE((_pEleStart), 0, 1, _val) #define GET_HT_CAPABILITY_ELE_LDPC_CAP(_pEleStart) LE_BITS_TO_1BYTE(_pEleStart, 0, 1) #define GET_HT_CAPABILITY_ELE_TX_STBC(_pEleStart) LE_BITS_TO_1BYTE(_pEleStart, 7, 1) #define GET_HT_CAPABILITY_ELE_RX_STBC(_pEleStart) LE_BITS_TO_1BYTE((_pEleStart)+1, 0, 2) #endif /* _RTL871X_HT_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PERF_BLOCK_RANGE_H #define __PERF_BLOCK_RANGE_H #include <stdbool.h> #include <linux/rbtree.h> #include <linux/types.h> struct symbol; /* * struct block_range - non-overlapping parts of basic blocks * @node: treenode * @start: inclusive start of range * @end: inclusive end of range * @is_target: @start is a jump target * @is_branch: @end is a branch instruction * @coverage: number of blocks that cover this range * @taken: number of times the branch is taken (requires @is_branch) * @pred: number of times the taken branch was predicted */ struct block_range { struct rb_node node; struct symbol *sym; u64 start; u64 end; int is_target, is_branch; u64 coverage; u64 entry; u64 taken; u64 pred; }; static inline struct block_range *block_range__next(struct block_range *br) { struct rb_node *n = rb_next(&br->node); if (!n) return NULL; return rb_entry(n, struct block_range, node); } struct block_range_iter { struct block_range *start; struct block_range *end; }; static inline struct block_range *block_range_iter(struct block_range_iter *iter) { return iter->start; } static inline bool block_range_iter__next(struct block_range_iter *iter) { if (iter->start == iter->end) return false; iter->start = block_range__next(iter->start); return true; } static inline bool block_range_iter__valid(struct block_range_iter *iter) { if (!iter->start || !iter->end) return false; return true; } extern struct block_range *block_range__find(u64 addr); extern struct block_range_iter block_range__create(u64 start, u64 end); extern double block_range__coverage(struct block_range *br); #endif /* __PERF_BLOCK_RANGE_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC8360E-RDK board file. * * Copyright (c) 2006 Freescale Semiconductor, Inc. * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/io.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <soc/fsl/qe/qe.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices); static void __init mpc836x_rdk_setup_arch(void) { mpc83xx_setup_arch(); } define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .compatible = "fsl,mpc8360rdk", .setup_arch = mpc836x_rdk_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
// SPDX-License-Identifier: GPL-2.0-only /* * leds-bd2802.c - RGB LED Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon <[email protected]> * * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio/consumer.h> #include <linux/delay.h> #include <linux/leds.h> #include <linux/leds-bd2802.h> #include <linux/slab.h> #include <linux/pm.h> #define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0)) #define BD2802_LED_OFFSET 0xa #define BD2802_COLOR_OFFSET 0x3 #define BD2802_REG_CLKSETUP 0x00 #define BD2802_REG_CONTROL 0x01 #define BD2802_REG_HOURSETUP 0x02 #define BD2802_REG_CURRENT1SETUP 0x03 #define BD2802_REG_CURRENT2SETUP 0x04 #define BD2802_REG_WAVEPATTERN 0x05 #define BD2802_CURRENT_032 0x10 /* 3.2mA */ #define BD2802_CURRENT_000 0x00 /* 0.0mA */ #define BD2802_PATTERN_FULL 0x07 #define BD2802_PATTERN_HALF 0x03 enum led_ids { LED1, LED2, LED_NUM, }; enum led_colors { RED, GREEN, BLUE, }; enum led_bits { BD2802_OFF, BD2802_BLINK, BD2802_ON, }; /* * State '0' : 'off' * State '1' : 'blink' * State '2' : 'on'. */ struct led_state { unsigned r:2; unsigned g:2; unsigned b:2; }; struct bd2802_led { struct bd2802_led_platform_data *pdata; struct i2c_client *client; struct gpio_desc *reset; struct rw_semaphore rwsem; struct led_state led[2]; /* * Making led_classdev as array is not recommended, because array * members prevent using 'container_of' macro. So repetitive works * are needed. */ struct led_classdev cdev_led1r; struct led_classdev cdev_led1g; struct led_classdev cdev_led1b; struct led_classdev cdev_led2r; struct led_classdev cdev_led2g; struct led_classdev cdev_led2b; /* * Advanced Configuration Function(ADF) mode: * In ADF mode, user can set registers of BD2802GU directly, * therefore BD2802GU doesn't enter reset state. */ int adf_on; enum led_ids led_id; enum led_colors color; enum led_bits state; /* General attributes of RGB LEDs */ int wave_pattern; int rgb_current; }; /*--------------------------------------------------------------*/ /* BD2802GU helper functions */ /*--------------------------------------------------------------*/ static inline int bd2802_is_rgb_off(struct bd2802_led *led, enum led_ids id, enum led_colors color) { switch (color) { case RED: return !led->led[id].r; case GREEN: return !led->led[id].g; case BLUE: return !led->led[id].b; default: dev_err(&led->client->dev, "%s: Invalid color\n", __func__); return -EINVAL; } } static inline int bd2802_is_led_off(struct bd2802_led *led, enum led_ids id) { if (led->led[id].r || led->led[id].g || led->led[id].b) return 0; return 1; } static inline int bd2802_is_all_off(struct bd2802_led *led) { int i; for (i = 0; i < LED_NUM; i++) if (!bd2802_is_led_off(led, i)) return 0; return 1; } static inline u8 bd2802_get_base_offset(enum led_ids id, enum led_colors color) { return id * BD2802_LED_OFFSET + color * BD2802_COLOR_OFFSET; } static inline u8 bd2802_get_reg_addr(enum led_ids id, enum led_colors color, u8 reg_offset) { return reg_offset + bd2802_get_base_offset(id, color); } /*--------------------------------------------------------------*/ /* BD2802GU core functions */ /*--------------------------------------------------------------*/ static int bd2802_write_byte(struct i2c_client *client, u8 reg, u8 val) { int ret = i2c_smbus_write_byte_data(client, reg, val); if (ret >= 0) return 0; dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, reg, val, ret); return ret; } static void bd2802_update_state(struct bd2802_led *led, enum led_ids id, enum led_colors color, enum led_bits led_bit) { int i; u8 value; for (i = 0; i < LED_NUM; i++) { if (i == id) { switch (color) { case RED: led->led[i].r = led_bit; break; case GREEN: led->led[i].g = led_bit; break; case BLUE: led->led[i].b = led_bit; break; default: dev_err(&led->client->dev, "%s: Invalid color\n", __func__); return; } } } if (led_bit == BD2802_BLINK || led_bit == BD2802_ON) return; if (!bd2802_is_led_off(led, id)) return; if (bd2802_is_all_off(led) && !led->adf_on) { gpiod_set_value(led->reset, 1); return; } /* * In this case, other led is turned on, and current led is turned * off. So set RGB LED Control register to stop the current RGB LED */ value = (id == LED1) ? LED_CTL(1, 0) : LED_CTL(0, 1); bd2802_write_byte(led->client, BD2802_REG_CONTROL, value); } static void bd2802_configure(struct bd2802_led *led) { struct bd2802_led_platform_data *pdata = led->pdata; u8 reg; reg = bd2802_get_reg_addr(LED1, RED, BD2802_REG_HOURSETUP); bd2802_write_byte(led->client, reg, pdata->rgb_time); reg = bd2802_get_reg_addr(LED2, RED, BD2802_REG_HOURSETUP); bd2802_write_byte(led->client, reg, pdata->rgb_time); } static void bd2802_reset_cancel(struct bd2802_led *led) { gpiod_set_value(led->reset, 0); udelay(100); bd2802_configure(led); } static void bd2802_enable(struct bd2802_led *led, enum led_ids id) { enum led_ids other_led = (id == LED1) ? LED2 : LED1; u8 value, other_led_on; other_led_on = !bd2802_is_led_off(led, other_led); if (id == LED1) value = LED_CTL(other_led_on, 1); else value = LED_CTL(1 , other_led_on); bd2802_write_byte(led->client, BD2802_REG_CONTROL, value); } static void bd2802_set_on(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_all_off(led) && !led->adf_on) bd2802_reset_cancel(led); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); bd2802_write_byte(led->client, reg, BD2802_PATTERN_FULL); bd2802_enable(led, id); bd2802_update_state(led, id, color, BD2802_ON); } static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_all_off(led) && !led->adf_on) bd2802_reset_cancel(led); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); bd2802_write_byte(led->client, reg, led->wave_pattern); bd2802_enable(led, id); bd2802_update_state(led, id, color, BD2802_BLINK); } static void bd2802_turn_on(struct bd2802_led *led, enum led_ids id, enum led_colors color, enum led_bits led_bit) { if (led_bit == BD2802_OFF) { dev_err(&led->client->dev, "Only 'blink' and 'on' are allowed\n"); return; } if (led_bit == BD2802_BLINK) bd2802_set_blink(led, id, color); else bd2802_set_on(led, id, color); } static void bd2802_turn_off(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_rgb_off(led, id, color)) return; reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); bd2802_update_state(led, id, color, BD2802_OFF); } #define BD2802_SET_REGISTER(reg_addr, reg_name) \ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ unsigned long val; \ int ret; \ if (!count) \ return -EINVAL; \ ret = kstrtoul(buf, 16, &val); \ if (ret) \ return ret; \ down_write(&led->rwsem); \ bd2802_write_byte(led->client, reg_addr, (u8) val); \ up_write(&led->rwsem); \ return count; \ } \ static struct device_attribute bd2802_reg##reg_addr##_attr = { \ .attr = {.name = reg_name, .mode = 0644}, \ .store = bd2802_store_reg##reg_addr, \ }; BD2802_SET_REGISTER(0x00, "0x00"); BD2802_SET_REGISTER(0x01, "0x01"); BD2802_SET_REGISTER(0x02, "0x02"); BD2802_SET_REGISTER(0x03, "0x03"); BD2802_SET_REGISTER(0x04, "0x04"); BD2802_SET_REGISTER(0x05, "0x05"); BD2802_SET_REGISTER(0x06, "0x06"); BD2802_SET_REGISTER(0x07, "0x07"); BD2802_SET_REGISTER(0x08, "0x08"); BD2802_SET_REGISTER(0x09, "0x09"); BD2802_SET_REGISTER(0x0a, "0x0a"); BD2802_SET_REGISTER(0x0b, "0x0b"); BD2802_SET_REGISTER(0x0c, "0x0c"); BD2802_SET_REGISTER(0x0d, "0x0d"); BD2802_SET_REGISTER(0x0e, "0x0e"); BD2802_SET_REGISTER(0x0f, "0x0f"); BD2802_SET_REGISTER(0x10, "0x10"); BD2802_SET_REGISTER(0x11, "0x11"); BD2802_SET_REGISTER(0x12, "0x12"); BD2802_SET_REGISTER(0x13, "0x13"); BD2802_SET_REGISTER(0x14, "0x14"); BD2802_SET_REGISTER(0x15, "0x15"); static struct device_attribute *bd2802_addr_attributes[] = { &bd2802_reg0x00_attr, &bd2802_reg0x01_attr, &bd2802_reg0x02_attr, &bd2802_reg0x03_attr, &bd2802_reg0x04_attr, &bd2802_reg0x05_attr, &bd2802_reg0x06_attr, &bd2802_reg0x07_attr, &bd2802_reg0x08_attr, &bd2802_reg0x09_attr, &bd2802_reg0x0a_attr, &bd2802_reg0x0b_attr, &bd2802_reg0x0c_attr, &bd2802_reg0x0d_attr, &bd2802_reg0x0e_attr, &bd2802_reg0x0f_attr, &bd2802_reg0x10_attr, &bd2802_reg0x11_attr, &bd2802_reg0x12_attr, &bd2802_reg0x13_attr, &bd2802_reg0x14_attr, &bd2802_reg0x15_attr, }; static void bd2802_enable_adv_conf(struct bd2802_led *led) { int i, ret; for (i = 0; i < ARRAY_SIZE(bd2802_addr_attributes); i++) { ret = device_create_file(&led->client->dev, bd2802_addr_attributes[i]); if (ret) { dev_err(&led->client->dev, "failed: sysfs file %s\n", bd2802_addr_attributes[i]->attr.name); goto failed_remove_files; } } if (bd2802_is_all_off(led)) bd2802_reset_cancel(led); led->adf_on = 1; return; failed_remove_files: for (i--; i >= 0; i--) device_remove_file(&led->client->dev, bd2802_addr_attributes[i]); } static void bd2802_disable_adv_conf(struct bd2802_led *led) { int i; for (i = 0; i < ARRAY_SIZE(bd2802_addr_attributes); i++) device_remove_file(&led->client->dev, bd2802_addr_attributes[i]); if (bd2802_is_all_off(led)) gpiod_set_value(led->reset, 1); led->adf_on = 0; } static ssize_t bd2802_show_adv_conf(struct device *dev, struct device_attribute *attr, char *buf) { struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev)); ssize_t ret; down_read(&led->rwsem); if (led->adf_on) ret = sprintf(buf, "on\n"); else ret = sprintf(buf, "off\n"); up_read(&led->rwsem); return ret; } static ssize_t bd2802_store_adv_conf(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev)); if (!count) return -EINVAL; down_write(&led->rwsem); if (!led->adf_on && !strncmp(buf, "on", 2)) bd2802_enable_adv_conf(led); else if (led->adf_on && !strncmp(buf, "off", 3)) bd2802_disable_adv_conf(led); up_write(&led->rwsem); return count; } static struct device_attribute bd2802_adv_conf_attr = { .attr = { .name = "advanced_configuration", .mode = 0644, }, .show = bd2802_show_adv_conf, .store = bd2802_store_adv_conf, }; #define BD2802_CONTROL_ATTR(attr_name, name_str) \ static ssize_t bd2802_show_##attr_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ ssize_t ret; \ down_read(&led->rwsem); \ ret = sprintf(buf, "0x%02x\n", led->attr_name); \ up_read(&led->rwsem); \ return ret; \ } \ static ssize_t bd2802_store_##attr_name(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ unsigned long val; \ int ret; \ if (!count) \ return -EINVAL; \ ret = kstrtoul(buf, 16, &val); \ if (ret) \ return ret; \ down_write(&led->rwsem); \ led->attr_name = val; \ up_write(&led->rwsem); \ return count; \ } \ static struct device_attribute bd2802_##attr_name##_attr = { \ .attr = { \ .name = name_str, \ .mode = 0644, \ }, \ .show = bd2802_show_##attr_name, \ .store = bd2802_store_##attr_name, \ }; BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern"); BD2802_CONTROL_ATTR(rgb_current, "rgb_current"); static struct device_attribute *bd2802_attributes[] = { &bd2802_adv_conf_attr, &bd2802_wave_pattern_attr, &bd2802_rgb_current_attr, }; #define BD2802_CONTROL_RGBS(name, id, clr) \ static int bd2802_set_##name##_brightness(struct led_classdev *led_cdev,\ enum led_brightness value) \ { \ struct bd2802_led *led = \ container_of(led_cdev, struct bd2802_led, cdev_##name); \ led->led_id = id; \ led->color = clr; \ if (value == LED_OFF) { \ led->state = BD2802_OFF; \ bd2802_turn_off(led, led->led_id, led->color); \ } else { \ led->state = BD2802_ON; \ bd2802_turn_on(led, led->led_id, led->color, BD2802_ON);\ } \ return 0; \ } \ static int bd2802_set_##name##_blink(struct led_classdev *led_cdev, \ unsigned long *delay_on, unsigned long *delay_off) \ { \ struct bd2802_led *led = \ container_of(led_cdev, struct bd2802_led, cdev_##name); \ if (*delay_on == 0 || *delay_off == 0) \ return -EINVAL; \ led->led_id = id; \ led->color = clr; \ led->state = BD2802_BLINK; \ bd2802_turn_on(led, led->led_id, led->color, BD2802_BLINK); \ return 0; \ } BD2802_CONTROL_RGBS(led1r, LED1, RED); BD2802_CONTROL_RGBS(led1g, LED1, GREEN); BD2802_CONTROL_RGBS(led1b, LED1, BLUE); BD2802_CONTROL_RGBS(led2r, LED2, RED); BD2802_CONTROL_RGBS(led2g, LED2, GREEN); BD2802_CONTROL_RGBS(led2b, LED2, BLUE); static int bd2802_register_led_classdev(struct bd2802_led *led) { int ret; led->cdev_led1r.name = "led1_R"; led->cdev_led1r.brightness = LED_OFF; led->cdev_led1r.brightness_set_blocking = bd2802_set_led1r_brightness; led->cdev_led1r.blink_set = bd2802_set_led1r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1r.name); goto failed_unregister_led1_R; } led->cdev_led1g.name = "led1_G"; led->cdev_led1g.brightness = LED_OFF; led->cdev_led1g.brightness_set_blocking = bd2802_set_led1g_brightness; led->cdev_led1g.blink_set = bd2802_set_led1g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1g.name); goto failed_unregister_led1_G; } led->cdev_led1b.name = "led1_B"; led->cdev_led1b.brightness = LED_OFF; led->cdev_led1b.brightness_set_blocking = bd2802_set_led1b_brightness; led->cdev_led1b.blink_set = bd2802_set_led1b_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1b.name); goto failed_unregister_led1_B; } led->cdev_led2r.name = "led2_R"; led->cdev_led2r.brightness = LED_OFF; led->cdev_led2r.brightness_set_blocking = bd2802_set_led2r_brightness; led->cdev_led2r.blink_set = bd2802_set_led2r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2r.name); goto failed_unregister_led2_R; } led->cdev_led2g.name = "led2_G"; led->cdev_led2g.brightness = LED_OFF; led->cdev_led2g.brightness_set_blocking = bd2802_set_led2g_brightness; led->cdev_led2g.blink_set = bd2802_set_led2g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2g.name); goto failed_unregister_led2_G; } led->cdev_led2b.name = "led2_B"; led->cdev_led2b.brightness = LED_OFF; led->cdev_led2b.brightness_set_blocking = bd2802_set_led2b_brightness; led->cdev_led2b.blink_set = bd2802_set_led2b_blink; led->cdev_led2b.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led2b); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2b.name); goto failed_unregister_led2_B; } return 0; failed_unregister_led2_B: led_classdev_unregister(&led->cdev_led2g); failed_unregister_led2_G: led_classdev_unregister(&led->cdev_led2r); failed_unregister_led2_R: led_classdev_unregister(&led->cdev_led1b); failed_unregister_led1_B: led_classdev_unregister(&led->cdev_led1g); failed_unregister_led1_G: led_classdev_unregister(&led->cdev_led1r); failed_unregister_led1_R: return ret; } static void bd2802_unregister_led_classdev(struct bd2802_led *led) { led_classdev_unregister(&led->cdev_led2b); led_classdev_unregister(&led->cdev_led2g); led_classdev_unregister(&led->cdev_led2r); led_classdev_unregister(&led->cdev_led1b); led_classdev_unregister(&led->cdev_led1g); led_classdev_unregister(&led->cdev_led1r); } static int bd2802_probe(struct i2c_client *client) { struct bd2802_led *led; int ret, i; led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL); if (!led) return -ENOMEM; led->client = client; i2c_set_clientdata(client, led); /* * Configure RESET GPIO (L: RESET, H: RESET cancel) * * We request the reset GPIO as OUT_LOW which means de-asserted, * board files specifying this GPIO line in a machine descriptor * table should take care to specify GPIO_ACTIVE_LOW for this line. */ led->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(led->reset)) return PTR_ERR(led->reset); /* Tacss = min 0.1ms */ udelay(100); /* Detect BD2802GU */ ret = bd2802_write_byte(client, BD2802_REG_CLKSETUP, 0x00); if (ret < 0) { dev_err(&client->dev, "failed to detect device\n"); return ret; } else dev_info(&client->dev, "return 0x%02x\n", ret); /* To save the power, reset BD2802 after detecting */ gpiod_set_value(led->reset, 1); /* Default attributes */ led->wave_pattern = BD2802_PATTERN_HALF; led->rgb_current = BD2802_CURRENT_032; init_rwsem(&led->rwsem); for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) { ret = device_create_file(&led->client->dev, bd2802_attributes[i]); if (ret) { dev_err(&led->client->dev, "failed: sysfs file %s\n", bd2802_attributes[i]->attr.name); goto failed_unregister_dev_file; } } ret = bd2802_register_led_classdev(led); if (ret < 0) goto failed_unregister_dev_file; return 0; failed_unregister_dev_file: for (i--; i >= 0; i--) device_remove_file(&led->client->dev, bd2802_attributes[i]); return ret; } static void bd2802_remove(struct i2c_client *client) { struct bd2802_led *led = i2c_get_clientdata(client); int i; gpiod_set_value(led->reset, 1); bd2802_unregister_led_classdev(led); if (led->adf_on) bd2802_disable_adv_conf(led); for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) device_remove_file(&led->client->dev, bd2802_attributes[i]); } #ifdef CONFIG_PM_SLEEP static void bd2802_restore_state(struct bd2802_led *led) { int i; for (i = 0; i < LED_NUM; i++) { if (led->led[i].r) bd2802_turn_on(led, i, RED, led->led[i].r); if (led->led[i].g) bd2802_turn_on(led, i, GREEN, led->led[i].g); if (led->led[i].b) bd2802_turn_on(led, i, BLUE, led->led[i].b); } } static int bd2802_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bd2802_led *led = i2c_get_clientdata(client); gpiod_set_value(led->reset, 1); return 0; } static int bd2802_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bd2802_led *led = i2c_get_clientdata(client); if (!bd2802_is_all_off(led) || led->adf_on) { bd2802_reset_cancel(led); bd2802_restore_state(led); } return 0; } #endif static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume); static const struct i2c_device_id bd2802_id[] = { { "BD2802" }, { } }; MODULE_DEVICE_TABLE(i2c, bd2802_id); static struct i2c_driver bd2802_i2c_driver = { .driver = { .name = "BD2802", .pm = &bd2802_pm, }, .probe = bd2802_probe, .remove = bd2802_remove, .id_table = bd2802_id, }; module_i2c_driver(bd2802_i2c_driver); MODULE_AUTHOR("Kim Kyuwon <[email protected]>"); MODULE_DESCRIPTION("BD2802 LED driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0+ /* * Touch Screen driver for Renesas MIGO-R Platform * * Copyright (c) 2008 Magnus Damm * Copyright (c) 2007 Ujjwal Pande <[email protected]>, * Kenati Technologies Pvt Ltd. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/i2c.h> #include <linux/timer.h> #define EVENT_PENDOWN 1 #define EVENT_REPEAT 2 #define EVENT_PENUP 3 struct migor_ts_priv { struct i2c_client *client; struct input_dev *input; int irq; }; static const u_int8_t migor_ts_ena_seq[17] = { 0x33, 0x22, 0x11, 0x01, 0x06, 0x07, }; static const u_int8_t migor_ts_dis_seq[17] = { }; static irqreturn_t migor_ts_isr(int irq, void *dev_id) { struct migor_ts_priv *priv = dev_id; unsigned short xpos, ypos; unsigned char event; u_int8_t buf[16]; /* * The touch screen controller chip is hooked up to the CPU * using I2C and a single interrupt line. The interrupt line * is pulled low whenever someone taps the screen. To deassert * the interrupt line we need to acknowledge the interrupt by * communicating with the controller over the slow i2c bus. * * Since I2C bus controller may sleep we are using threaded * IRQ here. */ memset(buf, 0, sizeof(buf)); /* Set Index 0 */ buf[0] = 0; if (i2c_master_send(priv->client, buf, 1) != 1) { dev_err(&priv->client->dev, "Unable to write i2c index\n"); goto out; } /* Now do Page Read */ if (i2c_master_recv(priv->client, buf, sizeof(buf)) != sizeof(buf)) { dev_err(&priv->client->dev, "Unable to read i2c page\n"); goto out; } ypos = ((buf[9] & 0x03) << 8 | buf[8]); xpos = ((buf[11] & 0x03) << 8 | buf[10]); event = buf[12]; switch (event) { case EVENT_PENDOWN: case EVENT_REPEAT: input_report_key(priv->input, BTN_TOUCH, 1); input_report_abs(priv->input, ABS_X, ypos); /*X-Y swap*/ input_report_abs(priv->input, ABS_Y, xpos); input_sync(priv->input); break; case EVENT_PENUP: input_report_key(priv->input, BTN_TOUCH, 0); input_sync(priv->input); break; } out: return IRQ_HANDLED; } static int migor_ts_open(struct input_dev *dev) { struct migor_ts_priv *priv = input_get_drvdata(dev); struct i2c_client *client = priv->client; int count; /* enable controller */ count = i2c_master_send(client, migor_ts_ena_seq, sizeof(migor_ts_ena_seq)); if (count != sizeof(migor_ts_ena_seq)) { dev_err(&client->dev, "Unable to enable touchscreen.\n"); return -ENXIO; } return 0; } static void migor_ts_close(struct input_dev *dev) { struct migor_ts_priv *priv = input_get_drvdata(dev); struct i2c_client *client = priv->client; disable_irq(priv->irq); /* disable controller */ i2c_master_send(client, migor_ts_dis_seq, sizeof(migor_ts_dis_seq)); enable_irq(priv->irq); } static int migor_ts_probe(struct i2c_client *client) { struct migor_ts_priv *priv; struct input_dev *input; int error; priv = kzalloc(sizeof(*priv), GFP_KERNEL); input = input_allocate_device(); if (!priv || !input) { dev_err(&client->dev, "failed to allocate memory\n"); error = -ENOMEM; goto err_free_mem; } priv->client = client; priv->input = input; priv->irq = client->irq; input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input->keybit); input_set_abs_params(input, ABS_X, 95, 955, 0, 0); input_set_abs_params(input, ABS_Y, 85, 935, 0, 0); input->name = client->name; input->id.bustype = BUS_I2C; input->dev.parent = &client->dev; input->open = migor_ts_open; input->close = migor_ts_close; input_set_drvdata(input, priv); error = request_threaded_irq(priv->irq, NULL, migor_ts_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, priv); if (error) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); goto err_free_mem; } error = input_register_device(input); if (error) goto err_free_irq; i2c_set_clientdata(client, priv); device_init_wakeup(&client->dev, 1); return 0; err_free_irq: free_irq(priv->irq, priv); err_free_mem: input_free_device(input); kfree(priv); return error; } static void migor_ts_remove(struct i2c_client *client) { struct migor_ts_priv *priv = i2c_get_clientdata(client); free_irq(priv->irq, priv); input_unregister_device(priv->input); kfree(priv); dev_set_drvdata(&client->dev, NULL); } static int migor_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) enable_irq_wake(priv->irq); return 0; } static int migor_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) disable_irq_wake(priv->irq); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume); static const struct i2c_device_id migor_ts_id[] = { { "migor_ts" }, { } }; MODULE_DEVICE_TABLE(i2c, migor_ts_id); static struct i2c_driver migor_ts_driver = { .driver = { .name = "migor_ts", .pm = pm_sleep_ptr(&migor_ts_pm), }, .probe = migor_ts_probe, .remove = migor_ts_remove, .id_table = migor_ts_id, }; module_i2c_driver(migor_ts_driver); MODULE_DESCRIPTION("MigoR Touchscreen driver"); MODULE_AUTHOR("Magnus Damm <[email protected]>"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later /* * BSC913xRDB Board Setup * * Author: Priyanka Jain <[email protected]> * * Copyright 2011-2012 Freescale Semiconductor Inc. */ #include <linux/of.h> #include <linux/pci.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <asm/udbg.h> #include "mpc85xx.h" static void __init bsc913x_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); if (!mpic) pr_err("bsc913x: Failed to allocate MPIC structure\n"); else mpic_init(mpic); } /* * Setup the architecture */ static void __init bsc913x_rdb_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("bsc913x_rdb_setup_arch()", 0); pr_info("bsc913x board from Freescale Semiconductor\n"); } machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices); define_machine(bsc9131_rdb) { .name = "BSC9131 RDB", .compatible = "fsl,bsc9131rdb", .setup_arch = bsc913x_rdb_setup_arch, .init_IRQ = bsc913x_rdb_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, };
/* * Cypress APA trackpad with I2C interface * * Author: Dudley Du <[email protected]> * Further cleanup and restructuring by: * Daniel Kurtz <[email protected]> * Benson Leung <[email protected]> * * Copyright (C) 2011-2015 Cypress Semiconductor, Inc. * Copyright (C) 2011-2012 Google, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/unaligned.h> #include "cyapa.h" #define GEN3_MAX_FINGERS 5 #define GEN3_FINGER_NUM(x) (((x) >> 4) & 0x07) #define BLK_HEAD_BYTES 32 /* Macro for register map group offset. */ #define PRODUCT_ID_SIZE 16 #define QUERY_DATA_SIZE 27 #define REG_PROTOCOL_GEN_QUERY_OFFSET 20 #define REG_OFFSET_DATA_BASE 0x0000 #define REG_OFFSET_COMMAND_BASE 0x0028 #define REG_OFFSET_QUERY_BASE 0x002a #define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE #define OP_RECALIBRATION_MASK 0x80 #define OP_REPORT_BASELINE_MASK 0x40 #define REG_OFFSET_MAX_BASELINE 0x0026 #define REG_OFFSET_MIN_BASELINE 0x0027 #define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1) #define SET_POWER_MODE_DELAY 10000 /* Unit: us */ #define SET_POWER_MODE_TRIES 5 #define GEN3_BL_CMD_CHECKSUM_SEED 0xff #define GEN3_BL_CMD_INITIATE_BL 0x38 #define GEN3_BL_CMD_WRITE_BLOCK 0x39 #define GEN3_BL_CMD_VERIFY_BLOCK 0x3a #define GEN3_BL_CMD_TERMINATE_BL 0x3b #define GEN3_BL_CMD_LAUNCH_APP 0xa5 /* * CYAPA trackpad device states. * Used in register 0x00, bit1-0, DeviceStatus field. * Other values indicate device is in an abnormal state and must be reset. */ #define CYAPA_DEV_NORMAL 0x03 #define CYAPA_DEV_BUSY 0x01 #define CYAPA_FW_BLOCK_SIZE 64 #define CYAPA_FW_READ_SIZE 16 #define CYAPA_FW_HDR_START 0x0780 #define CYAPA_FW_HDR_BLOCK_COUNT 2 #define CYAPA_FW_HDR_BLOCK_START (CYAPA_FW_HDR_START / CYAPA_FW_BLOCK_SIZE) #define CYAPA_FW_HDR_SIZE (CYAPA_FW_HDR_BLOCK_COUNT * \ CYAPA_FW_BLOCK_SIZE) #define CYAPA_FW_DATA_START 0x0800 #define CYAPA_FW_DATA_BLOCK_COUNT 480 #define CYAPA_FW_DATA_BLOCK_START (CYAPA_FW_DATA_START / CYAPA_FW_BLOCK_SIZE) #define CYAPA_FW_DATA_SIZE (CYAPA_FW_DATA_BLOCK_COUNT * \ CYAPA_FW_BLOCK_SIZE) #define CYAPA_FW_SIZE (CYAPA_FW_HDR_SIZE + CYAPA_FW_DATA_SIZE) #define CYAPA_CMD_LEN 16 #define GEN3_BL_IDLE_FW_MAJ_VER_OFFSET 0x0b #define GEN3_BL_IDLE_FW_MIN_VER_OFFSET (GEN3_BL_IDLE_FW_MAJ_VER_OFFSET + 1) struct cyapa_touch { /* * high bits or x/y position value * bit 7 - 4: high 4 bits of x position value * bit 3 - 0: high 4 bits of y position value */ u8 xy_hi; u8 x_lo; /* low 8 bits of x position value. */ u8 y_lo; /* low 8 bits of y position value. */ u8 pressure; /* id range is 1 - 15. It is incremented with every new touch. */ u8 id; } __packed; struct cyapa_reg_data { /* * bit 0 - 1: device status * bit 3 - 2: power mode * bit 6 - 4: reserved * bit 7: interrupt valid bit */ u8 device_status; /* * bit 7 - 4: number of fingers currently touching pad * bit 3: valid data check bit * bit 2: middle mechanism button state if exists * bit 1: right mechanism button state if exists * bit 0: left mechanism button state if exists */ u8 finger_btn; /* CYAPA reports up to 5 touches per packet. */ struct cyapa_touch touches[5]; } __packed; struct gen3_write_block_cmd { u8 checksum_seed; /* Always be 0xff */ u8 cmd_code; /* command code: 0x39 */ u8 key[8]; /* 8-byte security key */ __be16 block_num; u8 block_data[CYAPA_FW_BLOCK_SIZE]; u8 block_checksum; /* Calculated using bytes 12 - 75 */ u8 cmd_checksum; /* Calculated using bytes 0-76 */ } __packed; static const u8 security_key[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }; static const u8 bl_activate[] = { 0x00, 0xff, 0x38, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }; static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }; static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }; /* for byte read/write command */ #define CMD_RESET 0 #define CMD_POWER_MODE 1 #define CMD_DEV_STATUS 2 #define CMD_REPORT_MAX_BASELINE 3 #define CMD_REPORT_MIN_BASELINE 4 #define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1) #define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET) #define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE) #define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS) #define CYAPA_SMBUS_MAX_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MAX_BASELINE) #define CYAPA_SMBUS_MIN_BASELINE SMBUS_BYTE_CMD(CMD_REPORT_MIN_BASELINE) /* for group registers read/write command */ #define REG_GROUP_DATA 0 #define REG_GROUP_CMD 2 #define REG_GROUP_QUERY 3 #define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3)) #define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA) #define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD) #define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY) /* for register block read/write command */ #define CMD_BL_STATUS 0 #define CMD_BL_HEAD 1 #define CMD_BL_CMD 2 #define CMD_BL_DATA 3 #define CMD_BL_ALL 4 #define CMD_BLK_PRODUCT_ID 5 #define CMD_BLK_HEAD 6 #define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1)) /* register block read/write command in bootloader mode */ #define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS) #define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD) #define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD) #define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA) #define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL) /* register block read/write command in operational mode */ #define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID) #define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD) struct cyapa_cmd_len { u8 cmd; u8 len; }; /* maps generic CYAPA_CMD_* code to the I2C equivalent */ static const struct cyapa_cmd_len cyapa_i2c_cmds[] = { { CYAPA_OFFSET_SOFT_RESET, 1 }, /* CYAPA_CMD_SOFT_RESET */ { REG_OFFSET_COMMAND_BASE + 1, 1 }, /* CYAPA_CMD_POWER_MODE */ { REG_OFFSET_DATA_BASE, 1 }, /* CYAPA_CMD_DEV_STATUS */ { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) }, /* CYAPA_CMD_GROUP_DATA */ { REG_OFFSET_COMMAND_BASE, 0 }, /* CYAPA_CMD_GROUP_CMD */ { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE }, /* CYAPA_CMD_GROUP_QUERY */ { BL_HEAD_OFFSET, 3 }, /* CYAPA_CMD_BL_STATUS */ { BL_HEAD_OFFSET, 16 }, /* CYAPA_CMD_BL_HEAD */ { BL_HEAD_OFFSET, 16 }, /* CYAPA_CMD_BL_CMD */ { BL_DATA_OFFSET, 16 }, /* CYAPA_CMD_BL_DATA */ { BL_HEAD_OFFSET, 32 }, /* CYAPA_CMD_BL_ALL */ { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE }, /* CYAPA_CMD_BLK_PRODUCT_ID */ { REG_OFFSET_DATA_BASE, 32 }, /* CYAPA_CMD_BLK_HEAD */ { REG_OFFSET_MAX_BASELINE, 1 }, /* CYAPA_CMD_MAX_BASELINE */ { REG_OFFSET_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */ }; static const struct cyapa_cmd_len cyapa_smbus_cmds[] = { { CYAPA_SMBUS_RESET, 1 }, /* CYAPA_CMD_SOFT_RESET */ { CYAPA_SMBUS_POWER_MODE, 1 }, /* CYAPA_CMD_POWER_MODE */ { CYAPA_SMBUS_DEV_STATUS, 1 }, /* CYAPA_CMD_DEV_STATUS */ { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) }, /* CYAPA_CMD_GROUP_DATA */ { CYAPA_SMBUS_GROUP_CMD, 2 }, /* CYAPA_CMD_GROUP_CMD */ { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE }, /* CYAPA_CMD_GROUP_QUERY */ { CYAPA_SMBUS_BL_STATUS, 3 }, /* CYAPA_CMD_BL_STATUS */ { CYAPA_SMBUS_BL_HEAD, 16 }, /* CYAPA_CMD_BL_HEAD */ { CYAPA_SMBUS_BL_CMD, 16 }, /* CYAPA_CMD_BL_CMD */ { CYAPA_SMBUS_BL_DATA, 16 }, /* CYAPA_CMD_BL_DATA */ { CYAPA_SMBUS_BL_ALL, 32 }, /* CYAPA_CMD_BL_ALL */ { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE }, /* CYAPA_CMD_BLK_PRODUCT_ID */ { CYAPA_SMBUS_BLK_HEAD, 16 }, /* CYAPA_CMD_BLK_HEAD */ { CYAPA_SMBUS_MAX_BASELINE, 1 }, /* CYAPA_CMD_MAX_BASELINE */ { CYAPA_SMBUS_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */ }; static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa); /* * cyapa_smbus_read_block - perform smbus block read command * @cyapa - private data structure of the driver * @cmd - the properly encoded smbus command * @len - expected length of smbus command result * @values - buffer to store smbus command result * * Returns negative errno, else the number of bytes written. * * Note: * In trackpad device, the memory block allocated for I2C register map * is 256 bytes, so the max read block for I2C bus is 256 bytes. */ ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len, u8 *values) { ssize_t ret; u8 index; u8 smbus_cmd; u8 *buf; struct i2c_client *client = cyapa->client; if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd)) return -EINVAL; if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) { /* read specific block registers command. */ smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ); ret = i2c_smbus_read_block_data(client, smbus_cmd, values); goto out; } ret = 0; for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) { smbus_cmd = SMBUS_ENCODE_IDX(cmd, index); smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ); buf = values + I2C_SMBUS_BLOCK_MAX * index; ret = i2c_smbus_read_block_data(client, smbus_cmd, buf); if (ret < 0) goto out; } out: return ret > 0 ? len : ret; } static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx) { u8 cmd; if (cyapa->smbus) { cmd = cyapa_smbus_cmds[cmd_idx].cmd; cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ); } else { cmd = cyapa_i2c_cmds[cmd_idx].cmd; } return i2c_smbus_read_byte_data(cyapa->client, cmd); } static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value) { u8 cmd; if (cyapa->smbus) { cmd = cyapa_smbus_cmds[cmd_idx].cmd; cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE); } else { cmd = cyapa_i2c_cmds[cmd_idx].cmd; } return i2c_smbus_write_byte_data(cyapa->client, cmd, value); } ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len, u8 *values) { return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values); } static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg, size_t len, const u8 *values) { return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values); } ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values) { u8 cmd; size_t len; if (cyapa->smbus) { cmd = cyapa_smbus_cmds[cmd_idx].cmd; len = cyapa_smbus_cmds[cmd_idx].len; return cyapa_smbus_read_block(cyapa, cmd, len, values); } cmd = cyapa_i2c_cmds[cmd_idx].cmd; len = cyapa_i2c_cmds[cmd_idx].len; return cyapa_i2c_reg_read_block(cyapa, cmd, len, values); } /* * Determine the Gen3 trackpad device's current operating state. * */ static int cyapa_gen3_state_parse(struct cyapa *cyapa, u8 *reg_data, int len) { cyapa->state = CYAPA_STATE_NO_DEVICE; /* Parse based on Gen3 characteristic registers and bits */ if (reg_data[REG_BL_FILE] == BL_FILE && reg_data[REG_BL_ERROR] == BL_ERROR_NO_ERR_IDLE && (reg_data[REG_BL_STATUS] == (BL_STATUS_RUNNING | BL_STATUS_CSUM_VALID) || reg_data[REG_BL_STATUS] == BL_STATUS_RUNNING)) { /* * Normal state after power on or reset, * REG_BL_STATUS == 0x11, firmware image checksum is valid. * REG_BL_STATUS == 0x10, firmware image checksum is invalid. */ cyapa->gen = CYAPA_GEN3; cyapa->state = CYAPA_STATE_BL_IDLE; } else if (reg_data[REG_BL_FILE] == BL_FILE && (reg_data[REG_BL_STATUS] & BL_STATUS_RUNNING) == BL_STATUS_RUNNING) { cyapa->gen = CYAPA_GEN3; if (reg_data[REG_BL_STATUS] & BL_STATUS_BUSY) { cyapa->state = CYAPA_STATE_BL_BUSY; } else { if ((reg_data[REG_BL_ERROR] & BL_ERROR_BOOTLOADING) == BL_ERROR_BOOTLOADING) cyapa->state = CYAPA_STATE_BL_ACTIVE; else cyapa->state = CYAPA_STATE_BL_IDLE; } } else if ((reg_data[REG_OP_STATUS] & OP_STATUS_SRC) && (reg_data[REG_OP_DATA1] & OP_DATA_VALID)) { /* * Normal state when running in operational mode, * may also not in full power state or * busying in command process. */ if (GEN3_FINGER_NUM(reg_data[REG_OP_DATA1]) <= GEN3_MAX_FINGERS) { /* Finger number data is valid. */ cyapa->gen = CYAPA_GEN3; cyapa->state = CYAPA_STATE_OP; } } else if (reg_data[REG_OP_STATUS] == 0x0C && reg_data[REG_OP_DATA1] == 0x08) { /* Op state when first two registers overwritten with 0x00 */ cyapa->gen = CYAPA_GEN3; cyapa->state = CYAPA_STATE_OP; } else if (reg_data[REG_BL_STATUS] & (BL_STATUS_RUNNING | BL_STATUS_BUSY)) { cyapa->gen = CYAPA_GEN3; cyapa->state = CYAPA_STATE_BL_BUSY; } if (cyapa->gen == CYAPA_GEN3 && (cyapa->state == CYAPA_STATE_OP || cyapa->state == CYAPA_STATE_BL_IDLE || cyapa->state == CYAPA_STATE_BL_ACTIVE || cyapa->state == CYAPA_STATE_BL_BUSY)) return 0; return -EAGAIN; } /* * Enter bootloader by soft resetting the device. * * If device is already in the bootloader, the function just returns. * Otherwise, reset the device; after reset, device enters bootloader idle * state immediately. * * Returns: * 0 on success * -EAGAIN device was reset, but is not now in bootloader idle state * < 0 if the device never responds within the timeout */ static int cyapa_gen3_bl_enter(struct cyapa *cyapa) { int error; int waiting_time; error = cyapa_poll_state(cyapa, 500); if (error) return error; if (cyapa->state == CYAPA_STATE_BL_IDLE) { /* Already in BL_IDLE. Skipping reset. */ return 0; } if (cyapa->state != CYAPA_STATE_OP) return -EAGAIN; cyapa->operational = false; cyapa->state = CYAPA_STATE_NO_DEVICE; error = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET, 0x01); if (error) return -EIO; usleep_range(25000, 50000); waiting_time = 2000; /* For some shipset, max waiting time is 1~2s. */ do { error = cyapa_poll_state(cyapa, 500); if (error) { if (error == -ETIMEDOUT) { waiting_time -= 500; continue; } return error; } if ((cyapa->state == CYAPA_STATE_BL_IDLE) && !(cyapa->status[REG_BL_STATUS] & BL_STATUS_WATCHDOG)) break; msleep(100); waiting_time -= 100; } while (waiting_time > 0); if ((cyapa->state != CYAPA_STATE_BL_IDLE) || (cyapa->status[REG_BL_STATUS] & BL_STATUS_WATCHDOG)) return -EAGAIN; return 0; } static int cyapa_gen3_bl_activate(struct cyapa *cyapa) { int error; error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_activate), bl_activate); if (error) return error; /* Wait for bootloader to activate; takes between 2 and 12 seconds */ msleep(2000); error = cyapa_poll_state(cyapa, 11000); if (error) return error; if (cyapa->state != CYAPA_STATE_BL_ACTIVE) return -EAGAIN; return 0; } static int cyapa_gen3_bl_deactivate(struct cyapa *cyapa) { int error; error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate), bl_deactivate); if (error) return error; /* Wait for bootloader to switch to idle state; should take < 100ms */ msleep(100); error = cyapa_poll_state(cyapa, 500); if (error) return error; if (cyapa->state != CYAPA_STATE_BL_IDLE) return -EAGAIN; return 0; } /* * Exit bootloader * * Send bl_exit command, then wait 50 - 100 ms to let device transition to * operational mode. If this is the first time the device's firmware is * running, it can take up to 2 seconds to calibrate its sensors. So, poll * the device's new state for up to 2 seconds. * * Returns: * -EIO failure while reading from device * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware * 0 device is supported and in operational mode */ static int cyapa_gen3_bl_exit(struct cyapa *cyapa) { int error; error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit); if (error) return error; /* * Wait for bootloader to exit, and operation mode to start. * Normally, this takes at least 50 ms. */ msleep(50); /* * In addition, when a device boots for the first time after being * updated to new firmware, it must first calibrate its sensors, which * can take up to an additional 2 seconds. If the device power is * running low, this may take even longer. */ error = cyapa_poll_state(cyapa, 4000); if (error < 0) return error; if (cyapa->state != CYAPA_STATE_OP) return -EAGAIN; return 0; } static u16 cyapa_gen3_csum(const u8 *buf, size_t count) { int i; u16 csum = 0; for (i = 0; i < count; i++) csum += buf[i]; return csum; } /* * Verify the integrity of a CYAPA firmware image file. * * The firmware image file is 30848 bytes, composed of 482 64-byte blocks. * * The first 2 blocks are the firmware header. * The next 480 blocks are the firmware image. * * The first two bytes of the header hold the header checksum, computed by * summing the other 126 bytes of the header. * The last two bytes of the header hold the firmware image checksum, computed * by summing the 30720 bytes of the image modulo 0xffff. * * Both checksums are stored little-endian. */ static int cyapa_gen3_check_fw(struct cyapa *cyapa, const struct firmware *fw) { struct device *dev = &cyapa->client->dev; u16 csum; u16 csum_expected; /* Firmware must match exact 30848 bytes = 482 64-byte blocks. */ if (fw->size != CYAPA_FW_SIZE) { dev_err(dev, "invalid firmware size = %zu, expected %u.\n", fw->size, CYAPA_FW_SIZE); return -EINVAL; } /* Verify header block */ csum_expected = (fw->data[0] << 8) | fw->data[1]; csum = cyapa_gen3_csum(&fw->data[2], CYAPA_FW_HDR_SIZE - 2); if (csum != csum_expected) { dev_err(dev, "%s %04x, expected: %04x\n", "invalid firmware header checksum = ", csum, csum_expected); return -EINVAL; } /* Verify firmware image */ csum_expected = (fw->data[CYAPA_FW_HDR_SIZE - 2] << 8) | fw->data[CYAPA_FW_HDR_SIZE - 1]; csum = cyapa_gen3_csum(&fw->data[CYAPA_FW_HDR_SIZE], CYAPA_FW_DATA_SIZE); if (csum != csum_expected) { dev_err(dev, "%s %04x, expected: %04x\n", "invalid firmware header checksum = ", csum, csum_expected); return -EINVAL; } return 0; } /* * Write a |len| byte long buffer |buf| to the device, by chopping it up into a * sequence of smaller |CYAPA_CMD_LEN|-length write commands. * * The data bytes for a write command are prepended with the 1-byte offset * of the data relative to the start of |buf|. */ static int cyapa_gen3_write_buffer(struct cyapa *cyapa, const u8 *buf, size_t len) { int error; size_t i; unsigned char cmd[CYAPA_CMD_LEN + 1]; size_t cmd_len; for (i = 0; i < len; i += CYAPA_CMD_LEN) { const u8 *payload = &buf[i]; cmd_len = (len - i >= CYAPA_CMD_LEN) ? CYAPA_CMD_LEN : len - i; cmd[0] = i; memcpy(&cmd[1], payload, cmd_len); error = cyapa_i2c_reg_write_block(cyapa, 0, cmd_len + 1, cmd); if (error) return error; } return 0; } /* * A firmware block write command writes 64 bytes of data to a single flash * page in the device. The 78-byte block write command has the format: * <0xff> <CMD> <Key> <Start> <Data> <Data-Checksum> <CMD Checksum> * * <0xff> - every command starts with 0xff * <CMD> - the write command value is 0x39 * <Key> - write commands include an 8-byte key: { 00 01 02 03 04 05 06 07 } * <Block> - Memory Block number (address / 64) (16-bit, big-endian) * <Data> - 64 bytes of firmware image data * <Data Checksum> - sum of 64 <Data> bytes, modulo 0xff * <CMD Checksum> - sum of 77 bytes, from 0xff to <Data Checksum> * * Each write command is split into 5 i2c write transactions of up to 16 bytes. * Each transaction starts with an i2c register offset: (00, 10, 20, 30, 40). */ static int cyapa_gen3_write_fw_block(struct cyapa *cyapa, u16 block, const u8 *data) { int ret; struct gen3_write_block_cmd write_block_cmd; u8 status[BL_STATUS_SIZE]; int tries; u8 bl_status, bl_error; /* Set write command and security key bytes. */ write_block_cmd.checksum_seed = GEN3_BL_CMD_CHECKSUM_SEED; write_block_cmd.cmd_code = GEN3_BL_CMD_WRITE_BLOCK; memcpy(write_block_cmd.key, security_key, sizeof(security_key)); put_unaligned_be16(block, &write_block_cmd.block_num); memcpy(write_block_cmd.block_data, data, CYAPA_FW_BLOCK_SIZE); write_block_cmd.block_checksum = cyapa_gen3_csum( write_block_cmd.block_data, CYAPA_FW_BLOCK_SIZE); write_block_cmd.cmd_checksum = cyapa_gen3_csum((u8 *)&write_block_cmd, sizeof(write_block_cmd) - 1); ret = cyapa_gen3_write_buffer(cyapa, (u8 *)&write_block_cmd, sizeof(write_block_cmd)); if (ret) return ret; /* Wait for write to finish */ tries = 11; /* Programming for one block can take about 100ms. */ do { usleep_range(10000, 20000); /* Check block write command result status. */ ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE, status); if (ret != BL_STATUS_SIZE) return (ret < 0) ? ret : -EIO; } while ((status[REG_BL_STATUS] & BL_STATUS_BUSY) && --tries); /* Ignore WATCHDOG bit and reserved bits. */ bl_status = status[REG_BL_STATUS] & ~BL_STATUS_REV_MASK; bl_error = status[REG_BL_ERROR] & ~BL_ERROR_RESERVED; if (bl_status & BL_STATUS_BUSY) ret = -ETIMEDOUT; else if (bl_status != BL_STATUS_RUNNING || bl_error != BL_ERROR_BOOTLOADING) ret = -EIO; else ret = 0; return ret; } static int cyapa_gen3_write_blocks(struct cyapa *cyapa, size_t start_block, size_t block_count, const u8 *image_data) { int error; int i; for (i = 0; i < block_count; i++) { size_t block = start_block + i; size_t addr = i * CYAPA_FW_BLOCK_SIZE; const u8 *data = &image_data[addr]; error = cyapa_gen3_write_fw_block(cyapa, block, data); if (error) return error; } return 0; } static int cyapa_gen3_do_fw_update(struct cyapa *cyapa, const struct firmware *fw) { struct device *dev = &cyapa->client->dev; int error; /* First write data, starting at byte 128 of fw->data */ error = cyapa_gen3_write_blocks(cyapa, CYAPA_FW_DATA_BLOCK_START, CYAPA_FW_DATA_BLOCK_COUNT, &fw->data[CYAPA_FW_HDR_BLOCK_COUNT * CYAPA_FW_BLOCK_SIZE]); if (error) { dev_err(dev, "FW update aborted, write image: %d\n", error); return error; } /* Then write checksum */ error = cyapa_gen3_write_blocks(cyapa, CYAPA_FW_HDR_BLOCK_START, CYAPA_FW_HDR_BLOCK_COUNT, &fw->data[0]); if (error) { dev_err(dev, "FW update aborted, write checksum: %d\n", error); return error; } return 0; } static ssize_t cyapa_gen3_do_calibrate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cyapa *cyapa = dev_get_drvdata(dev); unsigned long timeout; int ret; ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS); if (ret < 0) { dev_err(dev, "Error reading dev status: %d\n", ret); goto out; } if ((ret & CYAPA_DEV_NORMAL) != CYAPA_DEV_NORMAL) { dev_warn(dev, "Trackpad device is busy, device state: 0x%02x\n", ret); ret = -EAGAIN; goto out; } ret = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET, OP_RECALIBRATION_MASK); if (ret < 0) { dev_err(dev, "Failed to send calibrate command: %d\n", ret); goto out; } /* max recalibration timeout 2s. */ timeout = jiffies + 2 * HZ; do { /* * For this recalibration, the max time will not exceed 2s. * The average time is approximately 500 - 700 ms, and we * will check the status every 100 - 200ms. */ msleep(100); ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS); if (ret < 0) { dev_err(dev, "Error reading dev status: %d\n", ret); goto out; } if ((ret & CYAPA_DEV_NORMAL) == CYAPA_DEV_NORMAL) { dev_dbg(dev, "Calibration successful.\n"); goto out; } } while (time_is_after_jiffies(timeout)); dev_err(dev, "Failed to calibrate. Timeout.\n"); ret = -ETIMEDOUT; out: return ret < 0 ? ret : count; } static ssize_t cyapa_gen3_show_baseline(struct device *dev, struct device_attribute *attr, char *buf) { struct cyapa *cyapa = dev_get_drvdata(dev); int max_baseline, min_baseline; int tries; int ret; ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS); if (ret < 0) { dev_err(dev, "Error reading dev status. err = %d\n", ret); goto out; } if ((ret & CYAPA_DEV_NORMAL) != CYAPA_DEV_NORMAL) { dev_warn(dev, "Trackpad device is busy. device state = 0x%x\n", ret); ret = -EAGAIN; goto out; } ret = cyapa_write_byte(cyapa, CYAPA_CMD_SOFT_RESET, OP_REPORT_BASELINE_MASK); if (ret < 0) { dev_err(dev, "Failed to send report baseline command. %d\n", ret); goto out; } tries = 3; /* Try for 30 to 60 ms */ do { usleep_range(10000, 20000); ret = cyapa_read_byte(cyapa, CYAPA_CMD_DEV_STATUS); if (ret < 0) { dev_err(dev, "Error reading dev status. err = %d\n", ret); goto out; } if ((ret & CYAPA_DEV_NORMAL) == CYAPA_DEV_NORMAL) break; } while (--tries); if (tries == 0) { dev_err(dev, "Device timed out going to Normal state.\n"); ret = -ETIMEDOUT; goto out; } ret = cyapa_read_byte(cyapa, CYAPA_CMD_MAX_BASELINE); if (ret < 0) { dev_err(dev, "Failed to read max baseline. err = %d\n", ret); goto out; } max_baseline = ret; ret = cyapa_read_byte(cyapa, CYAPA_CMD_MIN_BASELINE); if (ret < 0) { dev_err(dev, "Failed to read min baseline. err = %d\n", ret); goto out; } min_baseline = ret; dev_dbg(dev, "Baseline report successful. Max: %d Min: %d\n", max_baseline, min_baseline); ret = sysfs_emit(buf, "%d %d\n", max_baseline, min_baseline); out: return ret; } /* * cyapa_get_wait_time_for_pwr_cmd * * Compute the amount of time we need to wait after updating the touchpad * power mode. The touchpad needs to consume the incoming power mode set * command at the current clock rate. */ static u16 cyapa_get_wait_time_for_pwr_cmd(u8 pwr_mode) { switch (pwr_mode) { case PWR_MODE_FULL_ACTIVE: return 20; case PWR_MODE_BTN_ONLY: return 20; case PWR_MODE_OFF: return 20; default: return cyapa_pwr_cmd_to_sleep_time(pwr_mode) + 50; } } /* * Set device power mode * * Write to the field to configure power state. Power states include : * Full : Max scans and report rate. * Idle : Report rate set by user specified time. * ButtonOnly : No scans for fingers. When the button is triggered, * a slave interrupt is asserted to notify host to wake up. * Off : Only awake for i2c commands from host. No function for button * or touch sensors. * * The power_mode command should conform to the following : * Full : 0x3f * Idle : Configurable from 20 to 1000ms. See note below for * cyapa_sleep_time_to_pwr_cmd and cyapa_pwr_cmd_to_sleep_time * ButtonOnly : 0x01 * Off : 0x00 * * Device power mode can only be set when device is in operational mode. */ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode, u16 always_unused, enum cyapa_pm_stage pm_stage) { struct input_dev *input = cyapa->input; u8 power; int tries; int sleep_time; int interval; int ret; if (cyapa->state != CYAPA_STATE_OP) return 0; tries = SET_POWER_MODE_TRIES; while (tries--) { ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE); if (ret >= 0) break; usleep_range(SET_POWER_MODE_DELAY, 2 * SET_POWER_MODE_DELAY); } if (ret < 0) return ret; /* * Return early if the power mode to set is the same as the current * one. */ if ((ret & PWR_MODE_MASK) == power_mode) return 0; sleep_time = (int)cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK); power = ret; power &= ~PWR_MODE_MASK; power |= power_mode & PWR_MODE_MASK; tries = SET_POWER_MODE_TRIES; while (tries--) { ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power); if (!ret) break; usleep_range(SET_POWER_MODE_DELAY, 2 * SET_POWER_MODE_DELAY); } /* * Wait for the newly set power command to go in at the previous * clock speed (scanrate) used by the touchpad firmware. Not * doing so before issuing the next command may result in errors * depending on the command's content. */ if (cyapa->operational && input && input_device_enabled(input) && (pm_stage == CYAPA_PM_RUNTIME_SUSPEND || pm_stage == CYAPA_PM_RUNTIME_RESUME)) { /* Try to polling in 120Hz, read may fail, just ignore it. */ interval = 1000 / 120; while (sleep_time > 0) { if (sleep_time > interval) msleep(interval); else msleep(sleep_time); sleep_time -= interval; cyapa_gen3_try_poll_handler(cyapa); } } else { msleep(sleep_time); } return ret; } static int cyapa_gen3_set_proximity(struct cyapa *cyapa, bool enable) { return -EOPNOTSUPP; } static int cyapa_gen3_get_query_data(struct cyapa *cyapa) { u8 query_data[QUERY_DATA_SIZE]; int ret; if (cyapa->state != CYAPA_STATE_OP) return -EBUSY; ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data); if (ret != QUERY_DATA_SIZE) return (ret < 0) ? ret : -EIO; memcpy(&cyapa->product_id[0], &query_data[0], 5); cyapa->product_id[5] = '-'; memcpy(&cyapa->product_id[6], &query_data[5], 6); cyapa->product_id[12] = '-'; memcpy(&cyapa->product_id[13], &query_data[11], 2); cyapa->product_id[15] = '\0'; cyapa->fw_maj_ver = query_data[15]; cyapa->fw_min_ver = query_data[16]; cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK; cyapa->gen = query_data[20] & 0x0f; cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22]; cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23]; cyapa->physical_size_x = ((query_data[24] & 0xf0) << 4) | query_data[25]; cyapa->physical_size_y = ((query_data[24] & 0x0f) << 8) | query_data[26]; cyapa->max_z = 255; return 0; } static int cyapa_gen3_bl_query_data(struct cyapa *cyapa) { u8 bl_data[CYAPA_CMD_LEN]; int ret; ret = cyapa_i2c_reg_read_block(cyapa, 0, CYAPA_CMD_LEN, bl_data); if (ret != CYAPA_CMD_LEN) return (ret < 0) ? ret : -EIO; /* * This value will be updated again when entered application mode. * If TP failed to enter application mode, this fw version values * can be used as a reference. * This firmware version valid when fw image checksum is valid. */ if (bl_data[REG_BL_STATUS] == (BL_STATUS_RUNNING | BL_STATUS_CSUM_VALID)) { cyapa->fw_maj_ver = bl_data[GEN3_BL_IDLE_FW_MAJ_VER_OFFSET]; cyapa->fw_min_ver = bl_data[GEN3_BL_IDLE_FW_MIN_VER_OFFSET]; } return 0; } /* * Check if device is operational. * * An operational device is responding, has exited bootloader, and has * firmware supported by this driver. * * Returns: * -EBUSY no device or in bootloader * -EIO failure while reading from device * -EAGAIN device is still in bootloader * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware * -EINVAL device is in operational mode, but not supported by this driver * 0 device is supported */ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa) { struct device *dev = &cyapa->client->dev; int error; switch (cyapa->state) { case CYAPA_STATE_BL_ACTIVE: error = cyapa_gen3_bl_deactivate(cyapa); if (error) { dev_err(dev, "failed to bl_deactivate: %d\n", error); return error; } fallthrough; case CYAPA_STATE_BL_IDLE: /* Try to get firmware version in bootloader mode. */ cyapa_gen3_bl_query_data(cyapa); error = cyapa_gen3_bl_exit(cyapa); if (error) { dev_err(dev, "failed to bl_exit: %d\n", error); return error; } fallthrough; case CYAPA_STATE_OP: /* * Reading query data before going back to the full mode * may cause problems, so we set the power mode first here. */ error = cyapa_gen3_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE); if (error) dev_err(dev, "%s: set full power mode failed: %d\n", __func__, error); error = cyapa_gen3_get_query_data(cyapa); if (error < 0) return error; /* Only support firmware protocol gen3 */ if (cyapa->gen != CYAPA_GEN3) { dev_err(dev, "unsupported protocol version (%d)", cyapa->gen); return -EINVAL; } /* Only support product ID starting with CYTRA */ if (memcmp(cyapa->product_id, product_id, strlen(product_id)) != 0) { dev_err(dev, "unsupported product ID (%s)\n", cyapa->product_id); return -EINVAL; } return 0; default: return -EIO; } return 0; } /* * Return false, do not continue process * Return true, continue process. */ static bool cyapa_gen3_irq_cmd_handler(struct cyapa *cyapa) { /* Not gen3 irq command response, skip for continue. */ if (cyapa->gen != CYAPA_GEN3) return true; if (cyapa->operational) return true; /* * Driver in detecting or other interface function processing, * so, stop cyapa_gen3_irq_handler to continue process to * avoid unwanted to error detecting and processing. * * And also, avoid the periodically asserted interrupts to be processed * as touch inputs when gen3 failed to launch into application mode, * which will cause gen3 stays in bootloader mode. */ return false; } static int cyapa_gen3_event_process(struct cyapa *cyapa, struct cyapa_reg_data *data) { struct input_dev *input = cyapa->input; int num_fingers; int i; num_fingers = (data->finger_btn >> 4) & 0x0f; for (i = 0; i < num_fingers; i++) { const struct cyapa_touch *touch = &data->touches[i]; /* Note: touch->id range is 1 to 15; slots are 0 to 14. */ int slot = touch->id - 1; input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, true); input_report_abs(input, ABS_MT_POSITION_X, ((touch->xy_hi & 0xf0) << 4) | touch->x_lo); input_report_abs(input, ABS_MT_POSITION_Y, ((touch->xy_hi & 0x0f) << 8) | touch->y_lo); input_report_abs(input, ABS_MT_PRESSURE, touch->pressure); } input_mt_sync_frame(input); if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK) input_report_key(input, BTN_LEFT, !!(data->finger_btn & OP_DATA_LEFT_BTN)); if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK) input_report_key(input, BTN_MIDDLE, !!(data->finger_btn & OP_DATA_MIDDLE_BTN)); if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK) input_report_key(input, BTN_RIGHT, !!(data->finger_btn & OP_DATA_RIGHT_BTN)); input_sync(input); return 0; } static int cyapa_gen3_irq_handler(struct cyapa *cyapa) { struct device *dev = &cyapa->client->dev; struct cyapa_reg_data data; int ret; ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data); if (ret != sizeof(data)) { dev_err(dev, "failed to read report data, (%d)\n", ret); return -EINVAL; } if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC || (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL || (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) { dev_err(dev, "invalid device state bytes: %02x %02x\n", data.device_status, data.finger_btn); return -EINVAL; } return cyapa_gen3_event_process(cyapa, &data); } /* * This function will be called in the cyapa_gen3_set_power_mode function, * and it's known that it may failed in some situation after the set power * mode command was sent. So this function is aimed to avoid the knwon * and unwanted output I2C and data parse error messages. */ static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa) { struct cyapa_reg_data data; int ret; ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data); if (ret != sizeof(data)) return -EINVAL; if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC || (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL || (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) return -EINVAL; return cyapa_gen3_event_process(cyapa, &data); } static int cyapa_gen3_initialize(struct cyapa *cyapa) { return 0; } static int cyapa_gen3_bl_initiate(struct cyapa *cyapa, const struct firmware *fw) { return 0; } static int cyapa_gen3_empty_output_data(struct cyapa *cyapa, u8 *buf, int *len, cb_sort func) { return 0; } const struct cyapa_dev_ops cyapa_gen3_ops = { .check_fw = cyapa_gen3_check_fw, .bl_enter = cyapa_gen3_bl_enter, .bl_activate = cyapa_gen3_bl_activate, .update_fw = cyapa_gen3_do_fw_update, .bl_deactivate = cyapa_gen3_bl_deactivate, .bl_initiate = cyapa_gen3_bl_initiate, .show_baseline = cyapa_gen3_show_baseline, .calibrate_store = cyapa_gen3_do_calibrate, .initialize = cyapa_gen3_initialize, .state_parse = cyapa_gen3_state_parse, .operational_check = cyapa_gen3_do_operational_check, .irq_handler = cyapa_gen3_irq_handler, .irq_cmd_handler = cyapa_gen3_irq_cmd_handler, .sort_empty_output_data = cyapa_gen3_empty_output_data, .set_power_mode = cyapa_gen3_set_power_mode, .set_proximity = cyapa_gen3_set_proximity, };
/* SPDX-License-Identifier: GPL-2.0-only */ /* drivers/net/ethernet/micrel/ks8851.h * * Copyright 2009 Simtec Electronics * Ben Dooks <[email protected]> * * KS8851 register definitions */ #ifndef __KS8851_H__ #define __KS8851_H__ #include <linux/eeprom_93cx6.h> #define KS_CCR 0x08 #define CCR_LE (1 << 10) /* KSZ8851-16MLL */ #define CCR_EEPROM (1 << 9) #define CCR_SPI (1 << 8) /* KSZ8851SNL */ #define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */ #define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */ #define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */ #define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */ #define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */ #define CCR_32PIN (1 << 0) /* KSZ8851SNL */ /* MAC address registers */ #define KS_MAR(_m) (0x14 - (_m)) #define KS_MARL 0x10 #define KS_MARM 0x12 #define KS_MARH 0x14 #define KS_OBCR 0x20 #define OBCR_ODS_16mA (1 << 6) #define KS_EEPCR 0x22 #define EEPCR_EESRWA (1 << 5) #define EEPCR_EESA (1 << 4) #define EEPCR_EESB (1 << 3) #define EEPCR_EEDO (1 << 2) #define EEPCR_EESCK (1 << 1) #define EEPCR_EECS (1 << 0) #define KS_MBIR 0x24 #define MBIR_TXMBF (1 << 12) #define MBIR_TXMBFA (1 << 11) #define MBIR_RXMBF (1 << 4) #define MBIR_RXMBFA (1 << 3) #define KS_GRR 0x26 #define GRR_QMU (1 << 1) #define GRR_GSR (1 << 0) #define KS_WFCR 0x2A #define WFCR_MPRXE (1 << 7) #define WFCR_WF3E (1 << 3) #define WFCR_WF2E (1 << 2) #define WFCR_WF1E (1 << 1) #define WFCR_WF0E (1 << 0) #define KS_WF0CRC0 0x30 #define KS_WF0CRC1 0x32 #define KS_WF0BM0 0x34 #define KS_WF0BM1 0x36 #define KS_WF0BM2 0x38 #define KS_WF0BM3 0x3A #define KS_WF1CRC0 0x40 #define KS_WF1CRC1 0x42 #define KS_WF1BM0 0x44 #define KS_WF1BM1 0x46 #define KS_WF1BM2 0x48 #define KS_WF1BM3 0x4A #define KS_WF2CRC0 0x50 #define KS_WF2CRC1 0x52 #define KS_WF2BM0 0x54 #define KS_WF2BM1 0x56 #define KS_WF2BM2 0x58 #define KS_WF2BM3 0x5A #define KS_WF3CRC0 0x60 #define KS_WF3CRC1 0x62 #define KS_WF3BM0 0x64 #define KS_WF3BM1 0x66 #define KS_WF3BM2 0x68 #define KS_WF3BM3 0x6A #define KS_TXCR 0x70 #define TXCR_TCGICMP (1 << 8) #define TXCR_TCGUDP (1 << 7) #define TXCR_TCGTCP (1 << 6) #define TXCR_TCGIP (1 << 5) #define TXCR_FTXQ (1 << 4) #define TXCR_TXFCE (1 << 3) #define TXCR_TXPE (1 << 2) #define TXCR_TXCRC (1 << 1) #define TXCR_TXE (1 << 0) #define KS_TXSR 0x72 #define TXSR_TXLC (1 << 13) #define TXSR_TXMC (1 << 12) #define TXSR_TXFID_MASK (0x3f << 0) #define TXSR_TXFID_SHIFT (0) #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) #define KS_RXCR1 0x74 #define RXCR1_FRXQ (1 << 15) #define RXCR1_RXUDPFCC (1 << 14) #define RXCR1_RXTCPFCC (1 << 13) #define RXCR1_RXIPFCC (1 << 12) #define RXCR1_RXPAFMA (1 << 11) #define RXCR1_RXFCE (1 << 10) #define RXCR1_RXEFE (1 << 9) #define RXCR1_RXMAFMA (1 << 8) #define RXCR1_RXBE (1 << 7) #define RXCR1_RXME (1 << 6) #define RXCR1_RXUE (1 << 5) #define RXCR1_RXAE (1 << 4) #define RXCR1_RXINVF (1 << 1) #define RXCR1_RXE (1 << 0) #define KS_RXCR2 0x76 #define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */ #define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */ #define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */ #define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */ #define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */ #define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */ #define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */ #define RXCR2_IUFFP (1 << 4) #define RXCR2_RXIUFCEZ (1 << 3) #define RXCR2_UDPLFE (1 << 2) #define RXCR2_RXICMPFCC (1 << 1) #define RXCR2_RXSAF (1 << 0) #define KS_TXMIR 0x78 #define KS_RXFHSR 0x7C #define RXFSHR_RXFV (1 << 15) #define RXFSHR_RXICMPFCS (1 << 13) #define RXFSHR_RXIPFCS (1 << 12) #define RXFSHR_RXTCPFCS (1 << 11) #define RXFSHR_RXUDPFCS (1 << 10) #define RXFSHR_RXBF (1 << 7) #define RXFSHR_RXMF (1 << 6) #define RXFSHR_RXUF (1 << 5) #define RXFSHR_RXMR (1 << 4) #define RXFSHR_RXFT (1 << 3) #define RXFSHR_RXFTL (1 << 2) #define RXFSHR_RXRF (1 << 1) #define RXFSHR_RXCE (1 << 0) #define KS_RXFHBCR 0x7E #define RXFHBCR_CNT_MASK (0xfff << 0) #define KS_TXQCR 0x80 #define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */ #define TXQCR_TXQMAM (1 << 1) #define TXQCR_METFE (1 << 0) #define KS_RXQCR 0x82 #define RXQCR_RXDTTS (1 << 12) #define RXQCR_RXDBCTS (1 << 11) #define RXQCR_RXFCTS (1 << 10) #define RXQCR_RXIPHTOE (1 << 9) #define RXQCR_RXDTTE (1 << 7) #define RXQCR_RXDBCTE (1 << 6) #define RXQCR_RXFCTE (1 << 5) #define RXQCR_ADRFE (1 << 4) #define RXQCR_SDA (1 << 3) #define RXQCR_RRXEF (1 << 0) #define KS_TXFDPR 0x84 #define TXFDPR_TXFPAI (1 << 14) #define TXFDPR_TXFP_MASK (0x7ff << 0) #define TXFDPR_TXFP_SHIFT (0) #define KS_RXFDPR 0x86 #define RXFDPR_RXFPAI (1 << 14) #define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */ #define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */ #define RXFDPR_RXFP_MASK (0x7ff << 0) #define RXFDPR_RXFP_SHIFT (0) #define KS_RXDTTR 0x8C #define KS_RXDBCTR 0x8E #define KS_IER 0x90 #define KS_ISR 0x92 #define IRQ_LCI (1 << 15) #define IRQ_TXI (1 << 14) #define IRQ_RXI (1 << 13) #define IRQ_RXOI (1 << 11) #define IRQ_TXPSI (1 << 9) #define IRQ_RXPSI (1 << 8) #define IRQ_TXSAI (1 << 6) #define IRQ_RXWFDI (1 << 5) #define IRQ_RXMPDI (1 << 4) #define IRQ_LDI (1 << 3) #define IRQ_EDI (1 << 2) #define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */ #define IRQ_DEDI (1 << 0) #define KS_RXFCTR 0x9C #define KS_RXFC 0x9D #define RXFCTR_RXFC_MASK (0xff << 8) #define RXFCTR_RXFC_SHIFT (8) #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) #define RXFCTR_RXFCT_MASK (0xff << 0) #define RXFCTR_RXFCT_SHIFT (0) #define KS_TXNTFSR 0x9E #define KS_MAHTR0 0xA0 #define KS_MAHTR1 0xA2 #define KS_MAHTR2 0xA4 #define KS_MAHTR3 0xA6 #define KS_FCLWR 0xB0 #define KS_FCHWR 0xB2 #define KS_FCOWR 0xB4 #define KS_CIDER 0xC0 #define CIDER_ID 0x8870 #define CIDER_REV_MASK (0x7 << 1) #define CIDER_REV_SHIFT (1) #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) #define KS_CGCR 0xC6 #define KS_IACR 0xC8 #define IACR_RDEN (1 << 12) #define IACR_TSEL_MASK (0x3 << 10) #define IACR_TSEL_SHIFT (10) #define IACR_TSEL_MIB (0x3 << 10) #define IACR_ADDR_MASK (0x1f << 0) #define IACR_ADDR_SHIFT (0) #define KS_IADLR 0xD0 #define KS_IAHDR 0xD2 #define KS_PMECR 0xD4 #define PMECR_PME_DELAY (1 << 14) #define PMECR_PME_POL (1 << 12) #define PMECR_WOL_WAKEUP (1 << 11) #define PMECR_WOL_MAGICPKT (1 << 10) #define PMECR_WOL_LINKUP (1 << 9) #define PMECR_WOL_ENERGY (1 << 8) #define PMECR_AUTO_WAKE_EN (1 << 7) #define PMECR_WAKEUP_NORMAL (1 << 6) #define PMECR_WKEVT_MASK (0xf << 2) #define PMECR_WKEVT_SHIFT (2) #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) #define PMECR_WKEVT_ENERGY (0x1 << 2) #define PMECR_WKEVT_LINK (0x2 << 2) #define PMECR_WKEVT_MAGICPKT (0x4 << 2) #define PMECR_WKEVT_FRAME (0x8 << 2) #define PMECR_PM_MASK (0x3 << 0) #define PMECR_PM_SHIFT (0) #define PMECR_PM_NORMAL (0x0 << 0) #define PMECR_PM_ENERGY (0x1 << 0) #define PMECR_PM_SOFTDOWN (0x2 << 0) #define PMECR_PM_POWERSAVE (0x3 << 0) /* Standard MII PHY data */ #define KS_P1MBCR 0xE4 #define KS_P1MBSR 0xE6 #define KS_PHY1ILR 0xE8 #define KS_PHY1IHR 0xEA #define KS_P1ANAR 0xEC #define KS_P1ANLPR 0xEE #define KS_P1SCLMD 0xF4 #define KS_P1CR 0xF6 #define P1CR_LEDOFF (1 << 15) #define P1CR_TXIDS (1 << 14) #define P1CR_RESTARTAN (1 << 13) #define P1CR_DISAUTOMDIX (1 << 10) #define P1CR_FORCEMDIX (1 << 9) #define P1CR_AUTONEGEN (1 << 7) #define P1CR_FORCE100 (1 << 6) #define P1CR_FORCEFDX (1 << 5) #define P1CR_ADV_FLOW (1 << 4) #define P1CR_ADV_100BT_FDX (1 << 3) #define P1CR_ADV_100BT_HDX (1 << 2) #define P1CR_ADV_10BT_FDX (1 << 1) #define P1CR_ADV_10BT_HDX (1 << 0) #define KS_P1SR 0xF8 #define P1SR_HP_MDIX (1 << 15) #define P1SR_REV_POL (1 << 13) #define P1SR_OP_100M (1 << 10) #define P1SR_OP_FDX (1 << 9) #define P1SR_OP_MDI (1 << 7) #define P1SR_AN_DONE (1 << 6) #define P1SR_LINK_GOOD (1 << 5) #define P1SR_PNTR_FLOW (1 << 4) #define P1SR_PNTR_100BT_FDX (1 << 3) #define P1SR_PNTR_100BT_HDX (1 << 2) #define P1SR_PNTR_10BT_FDX (1 << 1) #define P1SR_PNTR_10BT_HDX (1 << 0) /* TX Frame control */ #define TXFR_TXIC (1 << 15) #define TXFR_TXFID_MASK (0x3f << 0) #define TXFR_TXFID_SHIFT (0) /** * struct ks8851_rxctrl - KS8851 driver rx control * @mchash: Multicast hash-table data. * @rxcr1: KS_RXCR1 register setting * @rxcr2: KS_RXCR2 register setting * * Representation of the settings needs to control the receive filtering * such as the multicast hash-filter and the receive register settings. This * is used to make the job of working out if the receive settings change and * then issuing the new settings to the worker that will send the necessary * commands. */ struct ks8851_rxctrl { u16 mchash[4]; u16 rxcr1; u16 rxcr2; }; /** * union ks8851_tx_hdr - tx header data * @txb: The header as bytes * @txw: The header as 16bit, little-endian words * * A dual representation of the tx header data to allow * access to individual bytes, and to allow 16bit accesses * with 16bit alignment. */ union ks8851_tx_hdr { u8 txb[6]; __le16 txw[3]; }; /** * struct ks8851_net - KS8851 driver private data * @netdev: The network device we're bound to * @statelock: Lock on this structure for tx list. * @mii: The MII state information for the mii calls. * @rxctrl: RX settings for @rxctrl_work. * @rxctrl_work: Work queue for updating RX mode and multicast lists * @txq: Queue of packets for transmission. * @txh: Space for generating packet TX header in DMA-able data * @rxd: Space for receiving SPI data, in DMA-able space. * @txd: Space for transmitting SPI data, in DMA-able space. * @msg_enable: The message flags controlling driver output (see ethtool). * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR). * @queued_len: Space required in hardware TX buffer for queued packets in txq. * @fid: Incrementing frame id tag. * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * @vdd_reg: Optional regulator supplying the chip * @vdd_io: Optional digital power supply for IO * @gpio: Optional reset_n gpio * @mii_bus: Pointer to MII bus structure * @lock: Bus access lock callback * @unlock: Bus access unlock callback * @rdreg16: 16bit register read callback * @wrreg16: 16bit register write callback * @rdfifo: FIFO read callback * @wrfifo: FIFO write callback * @start_xmit: start_xmit() implementation callback * @flush_tx_work: flush_tx_work() implementation callback * * The @statelock is used to protect information in the structure which may * need to be accessed via several sources, such as the network driver layer * or one of the work queues. * * We align the buffers we may use for rx/tx to ensure that if the SPI driver * wants to DMA map them, it will not have any problems with data the driver * modifies. */ struct ks8851_net { struct net_device *netdev; spinlock_t statelock; union ks8851_tx_hdr txh ____cacheline_aligned; u8 rxd[8]; u8 txd[8]; u32 msg_enable ____cacheline_aligned; u16 tx_space; u8 fid; u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; struct work_struct rxctrl_work; struct sk_buff_head txq; unsigned int queued_len; struct eeprom_93cx6 eeprom; struct regulator *vdd_reg; struct regulator *vdd_io; struct gpio_desc *gpio; struct mii_bus *mii_bus; void (*lock)(struct ks8851_net *ks, unsigned long *flags); void (*unlock)(struct ks8851_net *ks, unsigned long *flags); unsigned int (*rdreg16)(struct ks8851_net *ks, unsigned int reg); void (*wrreg16)(struct ks8851_net *ks, unsigned int reg, unsigned int val); void (*rdfifo)(struct ks8851_net *ks, u8 *buff, unsigned int len); void (*wrfifo)(struct ks8851_net *ks, struct sk_buff *txp, bool irq); netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *dev); void (*flush_tx_work)(struct ks8851_net *ks); }; int ks8851_probe_common(struct net_device *netdev, struct device *dev, int msg_en); void ks8851_remove_common(struct device *dev); int ks8851_suspend(struct device *dev); int ks8851_resume(struct device *dev); static __maybe_unused SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume); /** * ks8851_done_tx - update and then free skbuff after transmitting * @ks: The device state * @txb: The buffer transmitted */ static void __maybe_unused ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb) { struct net_device *dev = ks->netdev; dev->stats.tx_bytes += txb->len; dev->stats.tx_packets++; dev_kfree_skb(txb); } #endif /* __KS8851_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support * * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/internal/aead.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/gcm.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) { return ret; } static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); switch (key_len) { case AES_KEYSIZE_128: ctx->u.aes.type = CCP_AES_TYPE_128; break; case AES_KEYSIZE_192: ctx->u.aes.type = CCP_AES_TYPE_192; break; case AES_KEYSIZE_256: ctx->u.aes.type = CCP_AES_TYPE_256; break; default: return -EINVAL; } ctx->u.aes.mode = CCP_AES_MODE_GCM; ctx->u.aes.key_len = key_len; memcpy(ctx->u.aes.key, key, key_len); sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); return 0; } static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 16: case 15: case 14: case 13: case 12: case 8: case 4: break; default: return -EINVAL; } return 0; } static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; int i; int ret = 0; if (!ctx->u.aes.key_len) return -EINVAL; if (ctx->u.aes.mode != CCP_AES_MODE_GCM) return -EINVAL; if (!req->iv) return -EINVAL; /* * 5 parts: * plaintext/ciphertext input * AAD * key * IV * Destination+tag buffer */ /* Prepare the IV: 12 bytes + an integer (counter) */ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); for (i = 0; i < 3; i++) rctx->iv[i + GCM_AES_IV_SIZE] = 0; rctx->iv[AES_BLOCK_SIZE - 1] = 1; /* Set up a scatterlist for the IV */ iv_sg = &rctx->iv_sg; iv_len = AES_BLOCK_SIZE; sg_init_one(iv_sg, rctx->iv, iv_len); /* The AAD + plaintext are concatenated in the src buffer */ memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm); rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = encrypt; rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; rctx->cmd.u.aes.iv = iv_sg; rctx->cmd.u.aes.iv_len = iv_len; rctx->cmd.u.aes.src = req->src; rctx->cmd.u.aes.src_len = req->cryptlen; rctx->cmd.u.aes.aad_len = req->assoclen; /* The cipher text + the tag are in the dst buffer */ rctx->cmd.u.aes.dst = req->dst; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; } static int ccp_aes_gcm_encrypt(struct aead_request *req) { return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT); } static int ccp_aes_gcm_decrypt(struct aead_request *req) { return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT); } static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm) { struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); ctx->complete = ccp_aes_gcm_complete; ctx->u.aes.key_len = 0; crypto_aead_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm) { } static struct aead_alg ccp_aes_gcm_defaults = { .setkey = ccp_aes_gcm_setkey, .setauthsize = ccp_aes_gcm_setauthsize, .encrypt = ccp_aes_gcm_encrypt, .decrypt = ccp_aes_gcm_decrypt, .init = ccp_aes_gcm_cra_init, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .cra_priority = CCP_CRA_PRIORITY, .cra_exit = ccp_aes_gcm_cra_exit, .cra_module = THIS_MODULE, }, }; struct ccp_aes_aead_def { enum ccp_aes_mode mode; unsigned int version; const char *name; const char *driver_name; unsigned int blocksize; unsigned int ivsize; struct aead_alg *alg_defaults; }; static struct ccp_aes_aead_def aes_aead_algs[] = { { .mode = CCP_AES_MODE_GHASH, .version = CCP_VERSION(5, 0), .name = "gcm(aes)", .driver_name = "gcm-aes-ccp", .blocksize = 1, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_gcm_defaults, }, }; static int ccp_register_aes_aead(struct list_head *head, const struct ccp_aes_aead_def *def) { struct ccp_crypto_aead *ccp_aead; struct aead_alg *alg; int ret; ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL); if (!ccp_aead) return -ENOMEM; INIT_LIST_HEAD(&ccp_aead->entry); ccp_aead->mode = def->mode; /* Copy the defaults and override as necessary */ alg = &ccp_aead->alg; *alg = *def->alg_defaults; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->driver_name); alg->base.cra_blocksize = def->blocksize; ret = crypto_register_aead(alg); if (ret) { pr_err("%s aead algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_aead); return ret; } list_add(&ccp_aead->entry, head); return 0; } int ccp_register_aes_aeads(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) { if (aes_aead_algs[i].version > ccpversion) continue; ret = ccp_register_aes_aead(head, &aes_aead_algs[i]); if (ret) return ret; } return 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * Fence mechanism for dma-buf and to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <[email protected]> * Maarten Lankhorst <[email protected]> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> #include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; /* * fence context counter: each execution context should have its own * fence context, this allows checking if fences belong to the same * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); /** * DOC: DMA fences overview * * DMA fences, represented by &struct dma_fence, are the kernel internal * synchronization primitive for DMA operations like GPU rendering, video * encoding/decoding, or displaying buffers on a screen. * * A fence is initialized using dma_fence_init() and completed using * dma_fence_signal(). Fences are associated with a context, allocated through * dma_fence_context_alloc(), and all fences on the same context are * fully ordered. * * Since the purposes of fences is to facilitate cross-device and * cross-application synchronization, there's multiple ways to use one: * * - Individual fences can be exposed as a &sync_file, accessed as a file * descriptor from userspace, created by calling sync_file_create(). This is * called explicit fencing, since userspace passes around explicit * synchronization points. * * - Some subsystems also have their own explicit fencing primitives, like * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying * fence to be updated. * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ /** * DOC: fence cross-driver contract * * Since &dma_fence provide a cross driver contract, all drivers must follow the * same rules: * * * Fences must complete in a reasonable time. Fences which represent kernels * and shaders submitted by userspace, which could run forever, must be backed * up by timeout and gpu hang recovery code. Minimally that code must prevent * further command submission and force complete all in-flight fences, e.g. * when the driver or hardware do not support gpu reset, or if the gpu reset * failed for some reason. Ideally the driver supports gpu recovery which only * affects the offending userspace context, and no other userspace * submissions. * * * Drivers may have different ideas of what completion within a reasonable * time means. Some hang recovery code uses a fixed timeout, others a mix * between observing forward progress and increasingly strict timeouts. * Drivers should not try to second guess timeout handling of fences from * other drivers. * * * To ensure there's no deadlocks of dma_fence_wait() against other locks * drivers should annotate all code required to reach dma_fence_signal(), * which completes the fences, with dma_fence_begin_signalling() and * dma_fence_end_signalling(). * * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). * This means any code required for fence completion cannot acquire a * &dma_resv lock. Note that this also pulls in the entire established * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). * * * Drivers are allowed to call dma_fence_wait() from their &shrinker * callbacks. This means any code required for fence completion cannot * allocate memory with GFP_KERNEL. * * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier * respectively &mmu_interval_notifier callbacks. This means any code required * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. * Only GFP_ATOMIC is permissible, which might fail. * * Note that only GPU drivers have a reasonable excuse for both requiring * &mmu_interval_notifier and &shrinker callbacks at the same time as having to * track asynchronous compute work using &dma_fence. No driver outside of * drivers/gpu should ever call dma_fence_wait() in such contexts. */ static const char *dma_fence_stub_get_name(struct dma_fence *fence) { return "stub"; } static const struct dma_fence_ops dma_fence_stub_ops = { .get_driver_name = dma_fence_stub_get_name, .get_timeline_name = dma_fence_stub_get_name, }; /** * dma_fence_get_stub - return a signaled fence * * Return a stub fence which is already signaled. The fence's * timestamp corresponds to the first time after boot this * function is called. */ struct dma_fence *dma_fence_get_stub(void) { spin_lock(&dma_fence_stub_lock); if (!dma_fence_stub.ops) { dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &dma_fence_stub.flags); dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); return dma_fence_get(&dma_fence_stub); } EXPORT_SYMBOL(dma_fence_get_stub); /** * dma_fence_allocate_private_stub - return a private, signaled fence * @timestamp: timestamp when the fence was signaled * * Return a newly allocated and signaled stub fence. */ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) { struct dma_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (fence == NULL) return NULL; dma_fence_init(fence, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); dma_fence_signal_timestamp(fence, timestamp); return fence; } EXPORT_SYMBOL(dma_fence_allocate_private_stub); /** * dma_fence_context_alloc - allocate an array of fence contexts * @num: amount of contexts to allocate * * This function will return the first index of the number of fence contexts * allocated. The fence context is used for setting &dma_fence.context to a * unique number by passing the context to dma_fence_init(). */ u64 dma_fence_context_alloc(unsigned num) { WARN_ON(!num); return atomic64_fetch_add(num, &dma_fence_context_counter); } EXPORT_SYMBOL(dma_fence_context_alloc); /** * DOC: fence signalling annotation * * Proving correctness of all the kernel code around &dma_fence through code * review and testing is tricky for a few reasons: * * * It is a cross-driver contract, and therefore all drivers must follow the * same rules for lock nesting order, calling contexts for various functions * and anything else significant for in-kernel interfaces. But it is also * impossible to test all drivers in a single machine, hence brute-force N vs. * N testing of all combinations is impossible. Even just limiting to the * possible combinations is infeasible. * * * There is an enormous amount of driver code involved. For render drivers * there's the tail of command submission, after fences are published, * scheduler code, interrupt and workers to process job completion, * and timeout, gpu reset and gpu hang recovery code. Plus for integration * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, * and &shrinker. For modesetting drivers there's the commit tail functions * between when fences for an atomic modeset are published, and when the * corresponding vblank completes, including any interrupt processing and * related workers. Auditing all that code, across all drivers, is not * feasible. * * * Due to how many other subsystems are involved and the locking hierarchies * this pulls in there is extremely thin wiggle-room for driver-specific * differences. &dma_fence interacts with almost all of the core memory * handling through page fault handlers via &dma_resv, dma_resv_lock() and * dma_resv_unlock(). On the other side it also interacts through all * allocation sites through &mmu_notifier and &shrinker. * * Furthermore lockdep does not handle cross-release dependencies, which means * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught * at runtime with some quick testing. The simplest example is one thread * waiting on a &dma_fence while holding a lock:: * * lock(A); * dma_fence_wait(B); * unlock(A); * * while the other thread is stuck trying to acquire the same lock, which * prevents it from signalling the fence the previous thread is stuck waiting * on:: * * lock(A); * unlock(A); * dma_fence_signal(B); * * By manually annotating all code relevant to signalling a &dma_fence we can * teach lockdep about these dependencies, which also helps with the validation * headache since now lockdep can check all the rules for us:: * * cookie = dma_fence_begin_signalling(); * lock(A); * unlock(A); * dma_fence_signal(B); * dma_fence_end_signalling(cookie); * * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to * annotate critical sections the following rules need to be observed: * * * All code necessary to complete a &dma_fence must be annotated, from the * point where a fence is accessible to other threads, to the point where * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, * and due to the very strict rules and many corner cases it is infeasible to * catch these just with review or normal stress testing. * * * &struct dma_resv deserves a special note, since the readers are only * protected by rcu. This means the signalling critical section starts as soon * as the new fences are installed, even before dma_resv_unlock() is called. * * * The only exception are fast paths and opportunistic signalling code, which * calls dma_fence_signal() purely as an optimization, but is not required to * guarantee completion of a &dma_fence. The usual example is a wait IOCTL * which calls dma_fence_signal(), while the mandatory completion path goes * through a hardware interrupt and possible job completion worker. * * * To aid composability of code, the annotations can be freely nested, as long * as the overall locking hierarchy is consistent. The annotations also work * both in interrupt and process context. Due to implementation details this * requires that callers pass an opaque cookie from * dma_fence_begin_signalling() to dma_fence_end_signalling(). * * * Validation against the cross driver contract is implemented by priming * lockdep with the relevant hierarchy at boot-up. This means even just * testing with a single device is enough to validate a driver, at least as * far as deadlocks with dma_fence_wait() against dma_fence_signal() are * concerned. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map dma_fence_lockdep_map = { .name = "dma_fence_map" }; /** * dma_fence_begin_signalling - begin a critical DMA fence signalling section * * Drivers should use this to annotate the beginning of any code section * required to eventually complete &dma_fence by calling dma_fence_signal(). * * The end of these critical sections are annotated with * dma_fence_end_signalling(). * * Returns: * * Opaque cookie needed by the implementation, which needs to be passed to * dma_fence_end_signalling(). */ bool dma_fence_begin_signalling(void) { /* explicitly nesting ... */ if (lock_is_held_type(&dma_fence_lockdep_map, 1)) return true; /* rely on might_sleep check for soft/hardirq locks */ if (in_atomic()) return true; /* ... and non-recursive successful read_trylock */ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_); return false; } EXPORT_SYMBOL(dma_fence_begin_signalling); /** * dma_fence_end_signalling - end a critical DMA fence signalling section * @cookie: opaque cookie from dma_fence_begin_signalling() * * Closes a critical section annotation opened by dma_fence_begin_signalling(). */ void dma_fence_end_signalling(bool cookie) { if (cookie) return; lock_release(&dma_fence_lockdep_map, _RET_IP_); } EXPORT_SYMBOL(dma_fence_end_signalling); void __dma_fence_might_wait(void) { bool tmp; tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); if (tmp) lock_release(&dma_fence_lockdep_map, _THIS_IP_); lock_map_acquire(&dma_fence_lockdep_map); lock_map_release(&dma_fence_lockdep_map); if (tmp) lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_); } #endif /** * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Unlike dma_fence_signal_timestamp(), this function must be called with * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; lockdep_assert_held(fence->lock); if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) return -EINVAL; /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); list_for_each_entry_safe(cur, tmp, &cb_list, node) { INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } return 0; } EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); /** * dma_fence_signal_timestamp - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) { unsigned long flags; int ret; if (WARN_ON(!fence)) return -EINVAL; spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, timestamp); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_signal_timestamp); /** * dma_fence_signal_locked - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock * held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_locked(struct dma_fence *fence) { return dma_fence_signal_timestamp_locked(fence, ktime_get()); } EXPORT_SYMBOL(dma_fence_signal_locked); /** * dma_fence_signal - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; int ret; bool tmp; if (WARN_ON(!fence)) return -EINVAL; tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); return ret; } EXPORT_SYMBOL(dma_fence_signal); /** * dma_fence_wait_timeout - sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly (buf-mgr between reservation and committing) * holds a reference to the fence, otherwise the fence might be * freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_any_timeout(). */ signed long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) { signed long ret; if (WARN_ON(timeout < 0)) return -EINVAL; might_sleep(); __dma_fence_might_wait(); dma_fence_enable_sw_signaling(fence); trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); trace_dma_fence_wait_end(fence); return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); /** * dma_fence_release - default release function for fences * @kref: &dma_fence.recfount * * This is the default release functions for &dma_fence. Drivers shouldn't call * this directly, but instead call dma_fence_put(). */ void dma_fence_release(struct kref *kref) { struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); trace_dma_fence_destroy(fence); if (WARN(!list_empty(&fence->cb_list) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno)) { unsigned long flags; /* * Failed to signal before release, likely a refcounting issue. * * This should never happen, but if it does make sure that we * don't leave chains dangling. We set the error flag first * so that the callbacks know this signal is due to an error. */ spin_lock_irqsave(fence->lock, flags); fence->error = -EDEADLK; dma_fence_signal_locked(fence); spin_unlock_irqrestore(fence->lock, flags); } if (fence->ops->release) fence->ops->release(fence); else dma_fence_free(fence); } EXPORT_SYMBOL(dma_fence_release); /** * dma_fence_free - default release function for &dma_fence. * @fence: fence to release * * This is the default implementation for &dma_fence_ops.release. It calls * kfree_rcu() on @fence. */ void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); } EXPORT_SYMBOL(dma_fence_free); static bool __dma_fence_enable_signaling(struct dma_fence *fence) { bool was_set; lockdep_assert_held(fence->lock); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return false; if (!was_set && fence->ops->enable_signaling) { trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); return false; } } return true; } /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable * * This will request for sw signaling to be enabled, to make the fence * complete as soon as possible. This calls &dma_fence_ops.enable_signaling * internally. */ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); /** * dma_fence_add_callback - add a callback to be called when the fence * is signaled * @fence: the fence to wait on * @cb: the callback to register * @func: the function to call * * Add a software callback to the fence. The caller should keep a reference to * the fence. * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. */ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func) { unsigned long flags; int ret = 0; if (WARN_ON(!fence || !func)) return -EINVAL; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } spin_lock_irqsave(fence->lock, flags); if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); } else { INIT_LIST_HEAD(&cb->node); ret = -ENOENT; } spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_add_callback); /** * dma_fence_get_status - returns the status upon completion * @fence: the dma_fence to query * * This wraps dma_fence_get_status_locked() to return the error status * condition on a signaled fence. See dma_fence_get_status_locked() for more * details. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ int dma_fence_get_status(struct dma_fence *fence) { unsigned long flags; int status; spin_lock_irqsave(fence->lock, flags); status = dma_fence_get_status_locked(fence); spin_unlock_irqrestore(fence->lock, flags); return status; } EXPORT_SYMBOL(dma_fence_get_status); /** * dma_fence_remove_callback - remove a callback from the signaling list * @fence: the fence to wait on * @cb: the callback to remove * * Remove a previously queued callback from the fence. This function returns * true if the callback is successfully removed, or false if the fence has * already been signaled. * * *WARNING*: * Cancelling a callback should only be done if you really know what you're * doing, since deadlocks and race conditions could occur all too easily. For * this reason, it should only ever be done on hardware lockup recovery, * with a reference held to the fence. * * Behaviour is undefined if @cb has not been added to @fence using * dma_fence_add_callback() beforehand. */ bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { unsigned long flags; bool ret; spin_lock_irqsave(fence->lock, flags); ret = !list_empty(&cb->node); if (ret) list_del_init(&cb->node); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_remove_callback); struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); wake_up_state(wait->task, TASK_NORMAL); } /** * dma_fence_default_wait - default sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. If timeout is zero the value one is * returned if the fence is already signaled for consistency with other * functions taking a jiffies timeout. */ signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; spin_lock_irqsave(fence->lock, flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } if (!timeout) { ret = 0; goto out; } cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(fence->lock, flags); ret = schedule_timeout(ret); spin_lock_irqsave(fence->lock, flags); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } if (!list_empty(&cb.base.node)) list_del(&cb.base.node); __set_current_state(TASK_RUNNING); out: spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_default_wait); static bool dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, uint32_t *idx) { int i; for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (idx) *idx = i; return true; } } return false; } /** * dma_fence_wait_any_timeout - sleep until any fence gets signaled * or until timeout elapses * @fences: array of fences to wait on * @count: number of fences to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * @idx: used to store the first signaled fence index, meaningful only on * positive return * * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies * on success. * * Synchronous waits for the first fence in the array to be signaled. The * caller needs to hold a reference to all fences in the array, otherwise a * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_timeout(). */ signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx) { struct default_wait_cb *cb; signed long ret = timeout; unsigned i; if (WARN_ON(!fences || !count || timeout < 0)) return -EINVAL; if (timeout == 0) { for (i = 0; i < count; ++i) if (dma_fence_is_signaled(fences[i])) { if (idx) *idx = i; return 1; } return 0; } cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); if (cb == NULL) { ret = -ENOMEM; goto err_free_cb; } for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; cb[i].task = current; if (dma_fence_add_callback(fence, &cb[i].base, dma_fence_default_wait_cb)) { /* This fence is already signaled */ if (idx) *idx = i; goto fence_rm_cb; } } while (ret > 0) { if (intr) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); if (dma_fence_test_signaled_any(fences, count, idx)) break; ret = schedule_timeout(ret); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } __set_current_state(TASK_RUNNING); fence_rm_cb: while (i-- > 0) dma_fence_remove_callback(fences[i], &cb[i].base); err_free_cb: kfree(cb); return ret; } EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** * DOC: deadline hints * * In an ideal world, it would be possible to pipeline a workload sufficiently * that a utilization based device frequency governor could arrive at a minimum * frequency that meets the requirements of the use-case, in order to minimize * power consumption. But in the real world there are many workloads which * defy this ideal. For example, but not limited to: * * * Workloads that ping-pong between device and CPU, with alternating periods * of CPU waiting for device, and device waiting on CPU. This can result in * devfreq and cpufreq seeing idle time in their respective domains and in * result reduce frequency. * * * Workloads that interact with a periodic time based deadline, such as double * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, * missing a vblank deadline results in an *increase* in idle time on the GPU * (since it has to wait an additional vblank period), sending a signal to * the GPU's devfreq to reduce frequency, when in fact the opposite is what is * needed. * * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline * (or indirectly via userspace facing ioctls like &sync_set_deadline). * The deadline hint provides a way for the waiting driver, or userspace, to * convey an appropriate sense of urgency to the signaling driver. * * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace * facing APIs). The time could either be some point in the future (such as * the vblank based deadline for page-flipping, or the start of a compositor's * composition cycle), or the current time to indicate an immediate deadline * hint (Ie. forward progress cannot be made until this fence is signaled). * * Multiple deadlines may be set on a given fence, even in parallel. See the * documentation for &dma_fence_ops.set_deadline. * * The deadline hint is just that, a hint. The driver that created the fence * may react by increasing frequency, making different scheduling choices, etc. * Or doing nothing at all. */ /** * dma_fence_set_deadline - set desired fence-wait deadline hint * @fence: the fence that is to be waited on * @deadline: the time by which the waiter hopes for the fence to be * signaled * * Give the fence signaler a hint about an upcoming deadline, such as * vblank, by which point the waiter would prefer the fence to be * signaled by. This is intended to give feedback to the fence signaler * to aid in power management decisions, such as boosting GPU frequency * if a periodic vblank deadline is approaching but the fence is not * yet signaled.. */ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) fence->ops->set_deadline(fence, deadline); } EXPORT_SYMBOL(dma_fence_set_deadline); /** * dma_fence_describe - Dump fence description into seq_file * @fence: the fence to describe * @seq: the seq_file to put the textual description into * * Dump a textual description of the fence and it's state into the seq_file. */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { seq_printf(seq, "%s %s seq %llu %ssignalled\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); } EXPORT_SYMBOL(dma_fence_describe); /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence * @lock: the irqsafe spinlock to use for locking this fence * @context: the execution context this fence is run on * @seqno: a linear increasing sequence number for this context * * Initializes an allocated fence, the caller doesn't have to keep its * refcount after committing with this fence, but it will need to hold a * refcount again if &dma_fence_ops.enable_signaling gets called. * * context and seqno are used for easy comparison between fences, allowing * to check which fence is later by simply using dma_fence_later(). */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount); fence->ops = ops; INIT_LIST_HEAD(&fence->cb_list); fence->lock = lock; fence->context = context; fence->seqno = seqno; fence->flags = 0UL; fence->error = 0; trace_dma_fence_init(fence); } EXPORT_SYMBOL(dma_fence_init);
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition (XP) uv-based functions. * * Architecture specific implementation of common functions. * */ #include <linux/device.h> #include <asm/uv/uv_hub.h> #if defined CONFIG_X86_64 #include <asm/uv/bios.h> #endif #include "../sgi-gru/grukservices.h" #include "xp.h" /* * Convert a virtual memory address to a physical memory address. */ static unsigned long xp_pa_uv(void *addr) { return uv_gpa(addr); } /* * Convert a global physical to socket physical address. */ static unsigned long xp_socket_pa_uv(unsigned long gpa) { return uv_gpa_to_soc_phys_ram(gpa); } static enum xp_retval xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa, size_t len) { int ret; unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa)); BUG_ON(!uv_gpa_in_mmr_space(src_gpa)); BUG_ON(len != 8); ret = gru_read_gpa(dst_va, src_gpa); if (ret == 0) return xpSuccess; dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " "len=%ld\n", dst_gpa, src_gpa, len); return xpGruCopyError; } static enum xp_retval xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, size_t len) { int ret; if (uv_gpa_in_mmr_space(src_gpa)) return xp_remote_mmr_read(dst_gpa, src_gpa, len); ret = gru_copy_gpa(dst_gpa, src_gpa, len); if (ret == 0) return xpSuccess; dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " "len=%ld\n", dst_gpa, src_gpa, len); return xpGruCopyError; } static int xp_cpu_to_nasid_uv(int cpuid) { /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */ return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); } static enum xp_retval xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) { int ret; #if defined CONFIG_X86_64 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); if (ret != BIOS_STATUS_SUCCESS) { dev_err(xp, "uv_bios_change_memprotect(,, " "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); return xpBiosError; } #else #error not a supported configuration #endif return xpSuccess; } static enum xp_retval xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) { int ret; #if defined CONFIG_X86_64 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_RESTRICT_ACCESS); if (ret != BIOS_STATUS_SUCCESS) { dev_err(xp, "uv_bios_change_memprotect(,, " "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); return xpBiosError; } #else #error not a supported configuration #endif return xpSuccess; } enum xp_retval xp_init_uv(void) { WARN_ON(!is_uv_system()); if (!is_uv_system()) return xpUnsupported; xp_max_npartitions = XP_MAX_NPARTITIONS_UV; #ifdef CONFIG_X86 xp_partition_id = sn_partition_id; xp_region_size = sn_region_size; #endif xp_pa = xp_pa_uv; xp_socket_pa = xp_socket_pa_uv; xp_remote_memcpy = xp_remote_memcpy_uv; xp_cpu_to_nasid = xp_cpu_to_nasid_uv; xp_expand_memprotect = xp_expand_memprotect_uv; xp_restrict_memprotect = xp_restrict_memprotect_uv; return xpSuccess; } void xp_exit_uv(void) { WARN_ON(!is_uv_system()); }
// SPDX-License-Identifier: GPL-2.0-only /* * NXP Wireless LAN device driver: 802.11h * * Copyright 2011-2020 NXP */ #include "main.h" #include "fw.h" void mwifiex_init_11h_params(struct mwifiex_private *priv) { priv->state_11h.is_11h_enabled = true; priv->state_11h.is_11h_active = false; } inline int mwifiex_is_11h_active(struct mwifiex_private *priv) { return priv->state_11h.is_11h_active; } /* This function appends 11h info to a buffer while joining an * infrastructure BSS */ static void mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer, struct mwifiex_bssdescriptor *bss_desc) { struct mwifiex_ie_types_header *ie_header; struct mwifiex_ie_types_pwr_capability *cap; struct mwifiex_ie_types_local_pwr_constraint *constraint; struct ieee80211_supported_band *sband; u8 radio_type; int i; if (!buffer || !(*buffer)) return; radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); sband = priv->wdev.wiphy->bands[radio_type]; cap = (struct mwifiex_ie_types_pwr_capability *)*buffer; cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY); cap->header.len = cpu_to_le16(2); cap->min_pwr = 0; cap->max_pwr = 0; *buffer += sizeof(*cap); constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer; constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT); constraint->header.len = cpu_to_le16(2); constraint->chan = bss_desc->channel; constraint->constraint = bss_desc->local_constraint; *buffer += sizeof(*constraint); ie_header = (struct mwifiex_ie_types_header *)*buffer; ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); ie_header->len = cpu_to_le16(2 * sband->n_channels + 2); *buffer += sizeof(*ie_header); *(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS; *(*buffer)++ = 2 * sband->n_channels; for (i = 0; i < sband->n_channels; i++) { *(*buffer)++ = ieee80211_frequency_to_channel( sband->channels[i].center_freq); *(*buffer)++ = 1; /* one channel in the subband */ } } /* Enable or disable the 11h extensions in the firmware */ int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag) { u32 enable = flag; /* enable master mode radar detection on AP interface */ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && enable) enable |= MWIFIEX_MASTER_RADAR_DET_MASK; return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true); } /* This functions processes TLV buffer for a pending BSS Join command. * * Activate 11h functionality in the firmware if the spectrum management * capability bit is found in the network we are joining. Also, necessary * TLVs are set based on requested network's 11h capability. */ void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, struct mwifiex_bssdescriptor *bss_desc) { if (bss_desc->sensed_11h) { /* Activate 11h functions in firmware, turns on capability * bit */ mwifiex_11h_activate(priv, true); priv->state_11h.is_11h_active = true; bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT; mwifiex_11h_process_infra_join(priv, buffer, bss_desc); } else { /* Deactivate 11h functions in the firmware */ mwifiex_11h_activate(priv, false); priv->state_11h.is_11h_active = false; bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT; } } /* This is DFS CAC work queue function. * This delayed work emits CAC finished event for cfg80211 if * CAC was started earlier. */ void mwifiex_dfs_cac_work_queue(struct work_struct *work) { struct cfg80211_chan_def chandef; struct delayed_work *delayed_work = to_delayed_work(work); struct mwifiex_private *priv = container_of(delayed_work, struct mwifiex_private, dfs_cac_work); chandef = priv->dfs_chandef; if (priv->wdev.links[0].cac_started) { mwifiex_dbg(priv->adapter, MSG, "CAC timer finished; No radar detected\n"); cfg80211_cac_event(priv->netdev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0); } } /* This function prepares channel report request command to FW for * starting radar detection. */ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf) { struct host_cmd_ds_chan_rpt_req *cr_req = &cmd->params.chan_rpt_req; struct mwifiex_radar_params *radar_params = (void *)data_buf; cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST); cmd->size = cpu_to_le16(S_DS_GEN); le16_unaligned_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req)); cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ); cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value; cr_req->chan_desc.chan_width = radar_params->chandef->width; cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms); if (radar_params->cac_time_ms) mwifiex_dbg(priv->adapter, MSG, "11h: issuing DFS Radar check for channel=%d\n", radar_params->chandef->chan->hw_value); else mwifiex_dbg(priv->adapter, MSG, "cancelling CAC\n"); return 0; } int mwifiex_stop_radar_detection(struct mwifiex_private *priv, struct cfg80211_chan_def *chandef) { struct mwifiex_radar_params radar_params; memset(&radar_params, 0, sizeof(struct mwifiex_radar_params)); radar_params.chandef = chandef; radar_params.cac_time_ms = 0; return mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REPORT_REQUEST, HostCmd_ACT_GEN_SET, 0, &radar_params, true); } /* This function is to abort ongoing CAC upon stopping AP operations * or during unload. */ void mwifiex_abort_cac(struct mwifiex_private *priv) { if (priv->wdev.links[0].cac_started) { if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef)) mwifiex_dbg(priv->adapter, ERROR, "failed to stop CAC in FW\n"); mwifiex_dbg(priv->adapter, MSG, "Aborting delayed work for CAC.\n"); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0); } } /* This function handles channel report event from FW during CAC period. * If radar is detected during CAC, driver indicates the same to cfg80211 * and also cancels ongoing delayed work. */ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, struct sk_buff *skb) { struct host_cmd_ds_chan_rpt_event *rpt_event; struct mwifiex_ie_types_chan_rpt_data *rpt; u16 event_len, tlv_len; rpt_event = (void *)(skb->data + sizeof(u32)); event_len = skb->len - (sizeof(struct host_cmd_ds_chan_rpt_event)+ sizeof(u32)); if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) { mwifiex_dbg(priv->adapter, ERROR, "Error in channel report event\n"); return -1; } while (event_len >= sizeof(struct mwifiex_ie_types_header)) { rpt = (void *)&rpt_event->tlvbuf; tlv_len = le16_to_cpu(rpt->header.len); switch (le16_to_cpu(rpt->header.type)) { case TLV_TYPE_CHANRPT_11H_BASIC: if (rpt->map.radar) { mwifiex_dbg(priv->adapter, MSG, "RADAR Detected on channel %d!\n", priv->dfs_chandef.chan->hw_value); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, NL80211_RADAR_DETECTED, GFP_KERNEL, 0); } break; default: break; } event_len -= (tlv_len + sizeof(rpt->header)); } return 0; } /* Handler for radar detected event from FW.*/ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_radar_det_event *rdr_event; rdr_event = (void *)(skb->data + sizeof(u32)); mwifiex_dbg(priv->adapter, MSG, "radar detected; indicating kernel\n"); if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef)) mwifiex_dbg(priv->adapter, ERROR, "Failed to stop CAC in FW\n"); cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef, GFP_KERNEL); mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n", rdr_event->reg_domain); mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n", rdr_event->det_type); return 0; } /* This is work queue function for channel switch handling. * This function takes care of updating new channel definitin to * bss config structure, restart AP and indicate channel switch success * to cfg80211. */ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) { struct mwifiex_uap_bss_param *bss_cfg; struct delayed_work *delayed_work = to_delayed_work(work); struct mwifiex_private *priv = container_of(delayed_work, struct mwifiex_private, dfs_chan_sw_work); bss_cfg = &priv->bss_cfg; if (!bss_cfg->beacon_period) { mwifiex_dbg(priv->adapter, ERROR, "channel switch: AP already stopped\n"); return; } mwifiex_uap_set_channel(priv, bss_cfg, priv->dfs_chandef); if (mwifiex_config_start_uap(priv, bss_cfg)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start AP after channel switch\n"); return; } mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); wiphy_lock(priv->wdev.wiphy); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0); wiphy_unlock(priv->wdev.wiphy); }
// SPDX-License-Identifier: GPL-2.0+ /* net/sched/act_ctinfo.c netfilter ctinfo connmark actions * * Copyright (c) 2019 Kevin Darbyshire-Bryant <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/pkt_cls.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/act_api.h> #include <net/pkt_cls.h> #include <uapi/linux/tc_act/tc_ctinfo.h> #include <net/tc_act/tc_ctinfo.h> #include <net/tc_wrapper.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_zones.h> static struct tc_action_ops act_ctinfo_ops; static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct tcf_ctinfo_params *cp, struct sk_buff *skb, int wlen, int proto) { u8 dscp, newdscp; newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) & ~INET_ECN_MASK; switch (proto) { case NFPROTO_IPV4: dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK; if (dscp != newdscp) { if (likely(!skb_try_make_writable(skb, wlen))) { ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, newdscp); ca->stats_dscp_set++; } else { ca->stats_dscp_error++; } } break; case NFPROTO_IPV6: dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK; if (dscp != newdscp) { if (likely(!skb_try_make_writable(skb, wlen))) { ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, newdscp); ca->stats_dscp_set++; } else { ca->stats_dscp_error++; } } break; default: break; } } static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct tcf_ctinfo_params *cp, struct sk_buff *skb) { ca->stats_cpmark_set++; skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask; } TC_INDIRECT_SCOPE int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { const struct nf_conntrack_tuple_hash *thash = NULL; struct tcf_ctinfo *ca = to_ctinfo(a); struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; enum ip_conntrack_info ctinfo; struct tcf_ctinfo_params *cp; struct nf_conn *ct; int proto, wlen; int action; cp = rcu_dereference_bh(ca->params); tcf_lastuse_update(&ca->tcf_tm); tcf_action_update_bstats(&ca->common, skb); action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV4; break; case htons(ETH_P_IPV6): wlen += sizeof(struct ipv6hdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV6; break; default: goto out; } ct = nf_ct_get(skb, &ctinfo); if (!ct) { /* look harder, usually ingress */ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, cp->net, &tuple)) goto out; zone.id = cp->zone; zone.dir = NF_CT_DEFAULT_ZONE_DIR; thash = nf_conntrack_find_get(cp->net, &zone, &tuple); if (!thash) goto out; ct = nf_ct_tuplehash_to_ctrack(thash); } if (cp->mode & CTINFO_MODE_DSCP) if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask)) tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto); if (cp->mode & CTINFO_MODE_CPMARK) tcf_ctinfo_cpmark_set(ct, ca, cp, skb); if (thash) nf_ct_put(ct); out: return action; } static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = { [TCA_CTINFO_ACT] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ctinfo)), [TCA_CTINFO_ZONE] = { .type = NLA_U16 }, [TCA_CTINFO_PARMS_DSCP_MASK] = { .type = NLA_U32 }, [TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 }, [TCA_CTINFO_PARMS_CPMARK_MASK] = { .type = NLA_U32 }, }; static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; u32 dscpmask = 0, dscpstatemask, index; struct nlattr *tb[TCA_CTINFO_MAX + 1]; struct tcf_ctinfo_params *cp_new; struct tcf_chain *goto_ch = NULL; struct tc_ctinfo *actparm; struct tcf_ctinfo *ci; u8 dscpmaskshift; int ret = 0, err; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "ctinfo requires attributes to be passed"); return -EINVAL; } err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack); if (err < 0) return err; if (!tb[TCA_CTINFO_ACT]) { NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_CTINFO_ACT attribute"); return -EINVAL; } actparm = nla_data(tb[TCA_CTINFO_ACT]); /* do some basic validation here before dynamically allocating things */ /* that we would otherwise have to clean up. */ if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) { dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]); /* need contiguous 6 bit mask */ dscpmaskshift = dscpmask ? __ffs(dscpmask) : 0; if ((~0 & (dscpmask >> dscpmaskshift)) != 0x3f) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CTINFO_PARMS_DSCP_MASK], "dscp mask must be 6 contiguous bits"); return -EINVAL; } dscpstatemask = nla_get_u32_default(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK], 0); /* mask & statemask must not overlap */ if (dscpmask & dscpstatemask) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CTINFO_PARMS_DSCP_STATEMASK], "dscp statemask must not overlap dscp mask"); return -EINVAL; } } /* done the validation:now to the actual action allocation */ index = actparm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_ctinfo_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (err > 0) { if (bind) /* don't override defaults */ return ACT_P_BOUND; if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } } else { return err; } err = tcf_action_check_ctrlact(actparm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; ci = to_ctinfo(*a); cp_new = kzalloc(sizeof(*cp_new), GFP_KERNEL); if (unlikely(!cp_new)) { err = -ENOMEM; goto put_chain; } cp_new->net = net; cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0); if (dscpmask) { cp_new->dscpmask = dscpmask; cp_new->dscpmaskshift = dscpmaskshift; cp_new->dscpstatemask = dscpstatemask; cp_new->mode |= CTINFO_MODE_DSCP; } if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) { cp_new->cpmarkmask = nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]); cp_new->mode |= CTINFO_MODE_CPMARK; } spin_lock_bh(&ci->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch); cp_new = rcu_replace_pointer(ci->params, cp_new, lockdep_is_held(&ci->tcf_lock)); spin_unlock_bh(&ci->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (cp_new) kfree_rcu(cp_new, rcu); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { struct tcf_ctinfo *ci = to_ctinfo(a); struct tc_ctinfo opt = { .index = ci->tcf_index, .refcnt = refcount_read(&ci->tcf_refcnt) - ref, .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, }; unsigned char *b = skb_tail_pointer(skb); struct tcf_ctinfo_params *cp; struct tcf_t t; spin_lock_bh(&ci->tcf_lock); cp = rcu_dereference_protected(ci->params, lockdep_is_held(&ci->tcf_lock)); tcf_tm_dump(&t, &ci->tcf_tm); if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD)) goto nla_put_failure; opt.action = ci->tcf_action; if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt)) goto nla_put_failure; if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone)) goto nla_put_failure; if (cp->mode & CTINFO_MODE_DSCP) { if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK, cp->dscpmask)) goto nla_put_failure; if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK, cp->dscpstatemask)) goto nla_put_failure; } if (cp->mode & CTINFO_MODE_CPMARK) { if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK, cp->cpmarkmask)) goto nla_put_failure; } if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET, ci->stats_dscp_set, TCA_CTINFO_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR, ci->stats_dscp_error, TCA_CTINFO_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET, ci->stats_cpmark_set, TCA_CTINFO_PAD)) goto nla_put_failure; spin_unlock_bh(&ci->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&ci->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_ctinfo_cleanup(struct tc_action *a) { struct tcf_ctinfo *ci = to_ctinfo(a); struct tcf_ctinfo_params *cp; cp = rcu_dereference_protected(ci->params, 1); if (cp) kfree_rcu(cp, rcu); } static struct tc_action_ops act_ctinfo_ops = { .kind = "ctinfo", .id = TCA_ID_CTINFO, .owner = THIS_MODULE, .act = tcf_ctinfo_act, .dump = tcf_ctinfo_dump, .init = tcf_ctinfo_init, .cleanup= tcf_ctinfo_cleanup, .size = sizeof(struct tcf_ctinfo), }; MODULE_ALIAS_NET_ACT("ctinfo"); static __net_init int ctinfo_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); return tc_action_net_init(net, tn, &act_ctinfo_ops); } static void __net_exit ctinfo_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_ctinfo_ops.net_id); } static struct pernet_operations ctinfo_net_ops = { .init = ctinfo_init_net, .exit_batch = ctinfo_exit_net, .id = &act_ctinfo_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init ctinfo_init_module(void) { return tcf_register_action(&act_ctinfo_ops, &ctinfo_net_ops); } static void __exit ctinfo_cleanup_module(void) { tcf_unregister_action(&act_ctinfo_ops, &ctinfo_net_ops); } module_init(ctinfo_init_module); module_exit(ctinfo_cleanup_module); MODULE_AUTHOR("Kevin Darbyshire-Bryant <[email protected]>"); MODULE_DESCRIPTION("Connection tracking mark actions"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Access to user system call parameters and results * * See asm-generic/syscall.h for function descriptions. * * Copyright (C) 2015 Mickaël Salaün <[email protected]> */ #ifndef __UM_SYSCALL_GENERIC_H #define __UM_SYSCALL_GENERIC_H #include <asm/ptrace.h> #include <linux/err.h> #include <linux/sched.h> #include <sysdep/ptrace.h> static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { return PT_REGS_SYSCALL_NR(regs); } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { /* do nothing */ } static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { const long error = regs_return_value(regs); return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) { return regs_return_value(regs); } static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { PT_REGS_SET_SYSCALL_RETURN(regs, (long) error ?: val); } static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { const struct uml_pt_regs *r = &regs->regs; *args++ = UPT_SYSCALL_ARG1(r); *args++ = UPT_SYSCALL_ARG2(r); *args++ = UPT_SYSCALL_ARG3(r); *args++ = UPT_SYSCALL_ARG4(r); *args++ = UPT_SYSCALL_ARG5(r); *args = UPT_SYSCALL_ARG6(r); } /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ #endif /* __UM_SYSCALL_GENERIC_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2021 Xillybus Ltd, http://www.xillybus.com * * Header file for the Xillybus class */ #ifndef __XILLYBUS_CLASS_H #define __XILLYBUS_CLASS_H #include <linux/types.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/module.h> int xillybus_init_chrdev(struct device *dev, const struct file_operations *fops, struct module *owner, void *private_data, unsigned char *idt, unsigned int len, int num_nodes, const char *prefix, bool enumerate); void xillybus_cleanup_chrdev(void *private_data, struct device *dev); int xillybus_find_inode(struct inode *inode, void **private_data, int *index); #endif /* __XILLYBUS_CLASS_H */
// SPDX-License-Identifier: GPL-2.0-only /* * ROHM BD99954 charger driver * * Copyright (C) 2020 Rohm Semiconductors * Originally written by: * Mikko Mutanen <[email protected]> * Markus Laine <[email protected]> * Bugs added by: * Matti Vaittinen <[email protected]> */ /* * The battery charging profile of BD99954. * * Curve (1) represents charging current. * Curve (2) represents battery voltage. * * The BD99954 data sheet divides charging to three phases. * a) Trickle-charge with constant current (8). * b) pre-charge with constant current (6) * c) fast-charge, first with constant current (5) phase. After * the battery voltage has reached target level (4) we have constant * voltage phase until charging current has dropped to termination * level (7) * * V ^ ^ I * . . * . . *(4)` `.` ` ` ` ` ` ` ` ` ` ` ` ` ` ----------------------------. * . :/ . * . o----+/:/ ` ` ` ` ` ` ` ` ` ` ` ` `.` ` (5) * . + :: + . * . + /- -- . * . +`/- + . * . o/- -: . * . .s. +` . * . .--+ `/ . * . ..`` + .: . * . -` + -- . * . (2) ...`` + :- . * . ...`` + -: . *(3)` `.`."" ` ` ` `+-------- ` ` ` ` ` ` `.:` ` ` ` ` ` ` ` ` .` ` (6) * . + `:. . * . + -: . * . + -:. . * . + .--. . * . (1) + `.+` ` ` `.` ` (7) * -..............` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` + ` ` ` .` ` (8) * . + - * -------------------------------------------------+++++++++--> * | trickle | pre | fast | * * Details of DT properties for different limits can be found from BD99954 * device tree binding documentation. */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/linear_range.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/power_supply.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/types.h> #include "bd99954-charger.h" /* Initial field values, converted to initial register values */ struct bd9995x_init_data { u16 vsysreg_set; /* VSYS Regulation Setting */ u16 ibus_lim_set; /* VBUS input current limitation */ u16 icc_lim_set; /* VCC/VACP Input Current Limit Setting */ u16 itrich_set; /* Trickle-charge Current Setting */ u16 iprech_set; /* Pre-Charge Current Setting */ u16 ichg_set; /* Fast-Charge constant current */ u16 vfastchg_reg_set1; /* Fast Charging Regulation Voltage */ u16 vprechg_th_set; /* Pre-charge Voltage Threshold Setting */ u16 vrechg_set; /* Re-charge Battery Voltage Setting */ u16 vbatovp_set; /* Battery Over Voltage Threshold Setting */ u16 iterm_set; /* Charging termination current */ }; struct bd9995x_state { u8 online; u16 chgstm_status; u16 vbat_vsys_status; u16 vbus_vcc_status; }; struct bd9995x_device { struct i2c_client *client; struct device *dev; struct power_supply *charger; struct regmap *rmap; struct regmap_field *rmap_fields[F_MAX_FIELDS]; int chip_id; int chip_rev; struct bd9995x_init_data init_data; struct bd9995x_state state; struct mutex lock; /* Protect state data */ }; static const struct regmap_range bd9995x_readonly_reg_ranges[] = { regmap_reg_range(CHGSTM_STATUS, SEL_ILIM_VAL), regmap_reg_range(IOUT_DACIN_VAL, IOUT_DACIN_VAL), regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS), regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS), regmap_reg_range(CHIP_ID, CHIP_REV), regmap_reg_range(SYSTEM_STATUS, SYSTEM_STATUS), regmap_reg_range(IBATP_VAL, VBAT_AVE_VAL), regmap_reg_range(VTH_VAL, EXTIADP_AVE_VAL), }; static const struct regmap_access_table bd9995x_writeable_regs = { .no_ranges = bd9995x_readonly_reg_ranges, .n_no_ranges = ARRAY_SIZE(bd9995x_readonly_reg_ranges), }; static const struct regmap_range bd9995x_volatile_reg_ranges[] = { regmap_reg_range(CHGSTM_STATUS, WDT_STATUS), regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS), regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS), regmap_reg_range(INT0_STATUS, INT7_STATUS), regmap_reg_range(SYSTEM_STATUS, SYSTEM_CTRL_SET), regmap_reg_range(IBATP_VAL, EXTIADP_AVE_VAL), /* Measurement regs */ }; static const struct regmap_access_table bd9995x_volatile_regs = { .yes_ranges = bd9995x_volatile_reg_ranges, .n_yes_ranges = ARRAY_SIZE(bd9995x_volatile_reg_ranges), }; static const struct regmap_range_cfg regmap_range_cfg[] = { { .selector_reg = MAP_SET, .selector_mask = 0xFFFF, .selector_shift = 0, .window_start = 0, .window_len = 0x100, .range_min = 0 * 0x100, .range_max = 3 * 0x100, }, }; static const struct regmap_config bd9995x_regmap_config = { .reg_bits = 8, .val_bits = 16, .reg_stride = 1, .max_register = 3 * 0x100, .cache_type = REGCACHE_RBTREE, .ranges = regmap_range_cfg, .num_ranges = ARRAY_SIZE(regmap_range_cfg), .val_format_endian = REGMAP_ENDIAN_LITTLE, .wr_table = &bd9995x_writeable_regs, .volatile_table = &bd9995x_volatile_regs, }; enum bd9995x_chrg_fault { CHRG_FAULT_NORMAL, CHRG_FAULT_INPUT, CHRG_FAULT_THERMAL_SHUTDOWN, CHRG_FAULT_TIMER_EXPIRED, }; static int bd9995x_get_prop_batt_health(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp); if (ret) return POWER_SUPPLY_HEALTH_UNKNOWN; /* TODO: Check these against datasheet page 34 */ switch (tmp) { case ROOM: return POWER_SUPPLY_HEALTH_GOOD; case HOT1: case HOT2: case HOT3: return POWER_SUPPLY_HEALTH_OVERHEAT; case COLD1: case COLD2: return POWER_SUPPLY_HEALTH_COLD; case TEMP_DIS: case BATT_OPEN: default: return POWER_SUPPLY_HEALTH_UNKNOWN; } } static int bd9995x_get_prop_charge_type(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_CHGSTM_STATE], &tmp); if (ret) return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; switch (tmp) { case CHGSTM_TRICKLE_CHARGE: case CHGSTM_PRE_CHARGE: return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; case CHGSTM_FAST_CHARGE: return POWER_SUPPLY_CHARGE_TYPE_FAST; case CHGSTM_TOP_OFF: case CHGSTM_DONE: case CHGSTM_SUSPEND: return POWER_SUPPLY_CHARGE_TYPE_NONE; default: /* Rest of the states are error related, no charging */ return POWER_SUPPLY_CHARGE_TYPE_NONE; } } static bool bd9995x_get_prop_batt_present(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp); if (ret) return false; return tmp != BATT_OPEN; } static int bd9995x_get_prop_batt_voltage(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_VBAT_VAL], &tmp); if (ret) return 0; tmp = min(tmp, 19200); return tmp * 1000; } static int bd9995x_get_prop_batt_current(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp); if (ret) return 0; return tmp * 1000; } #define DEFAULT_BATTERY_TEMPERATURE 250 static int bd9995x_get_prop_batt_temp(struct bd9995x_device *bd) { int ret, tmp; ret = regmap_field_read(bd->rmap_fields[F_THERM_VAL], &tmp); if (ret) return DEFAULT_BATTERY_TEMPERATURE; return (200 - tmp) * 10; } static int bd9995x_power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret, tmp; struct bd9995x_device *bd = power_supply_get_drvdata(psy); struct bd9995x_state state; mutex_lock(&bd->lock); state = bd->state; mutex_unlock(&bd->lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: switch (state.chgstm_status) { case CHGSTM_TRICKLE_CHARGE: case CHGSTM_PRE_CHARGE: case CHGSTM_FAST_CHARGE: case CHGSTM_TOP_OFF: val->intval = POWER_SUPPLY_STATUS_CHARGING; break; case CHGSTM_DONE: val->intval = POWER_SUPPLY_STATUS_FULL; break; case CHGSTM_SUSPEND: case CHGSTM_TEMPERATURE_ERROR_1: case CHGSTM_TEMPERATURE_ERROR_2: case CHGSTM_TEMPERATURE_ERROR_3: case CHGSTM_TEMPERATURE_ERROR_4: case CHGSTM_TEMPERATURE_ERROR_5: case CHGSTM_TEMPERATURE_ERROR_6: case CHGSTM_TEMPERATURE_ERROR_7: case CHGSTM_THERMAL_SHUT_DOWN_1: case CHGSTM_THERMAL_SHUT_DOWN_2: case CHGSTM_THERMAL_SHUT_DOWN_3: case CHGSTM_THERMAL_SHUT_DOWN_4: case CHGSTM_THERMAL_SHUT_DOWN_5: case CHGSTM_THERMAL_SHUT_DOWN_6: case CHGSTM_THERMAL_SHUT_DOWN_7: case CHGSTM_BATTERY_ERROR: val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; default: val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; } break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = BD9995X_MANUFACTURER; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = state.online; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp); if (ret) return ret; val->intval = tmp * 1000; break; case POWER_SUPPLY_PROP_CHARGE_AVG: ret = regmap_field_read(bd->rmap_fields[F_IBATP_AVE_VAL], &tmp); if (ret) return ret; val->intval = tmp * 1000; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: /* * Currently the DT uses this property to give the * target current for fast-charging constant current phase. * I think it is correct in a sense. * * Yet, this prop we read and return here is the programmed * safety limit for combined input currents. This feels * also correct in a sense. * * However, this results a mismatch to DT value and value * read from sysfs. */ ret = regmap_field_read(bd->rmap_fields[F_SEL_ILIM_VAL], &tmp); if (ret) return ret; val->intval = tmp * 1000; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: if (!state.online) { val->intval = 0; break; } ret = regmap_field_read(bd->rmap_fields[F_VFASTCHG_REG_SET1], &tmp); if (ret) return ret; /* * The actual range : 2560 to 19200 mV. No matter what the * register says */ val->intval = clamp_val(tmp << 4, 2560, 19200); val->intval *= 1000; break; case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: ret = regmap_field_read(bd->rmap_fields[F_ITERM_SET], &tmp); if (ret) return ret; /* Start step is 64 mA */ val->intval = tmp << 6; /* Maximum is 1024 mA - no matter what register says */ val->intval = min(val->intval, 1024); val->intval *= 1000; break; /* Battery properties which we access through charger */ case POWER_SUPPLY_PROP_PRESENT: val->intval = bd9995x_get_prop_batt_present(bd); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = bd9995x_get_prop_batt_voltage(bd); break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = bd9995x_get_prop_batt_current(bd); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = bd9995x_get_prop_charge_type(bd); break; case POWER_SUPPLY_PROP_HEALTH: val->intval = bd9995x_get_prop_batt_health(bd); break; case POWER_SUPPLY_PROP_TEMP: val->intval = bd9995x_get_prop_batt_temp(bd); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = "bd99954"; break; default: return -EINVAL; } return 0; } static int bd9995x_get_chip_state(struct bd9995x_device *bd, struct bd9995x_state *state) { int i, ret, tmp; struct { struct regmap_field *id; u16 *data; } state_fields[] = { { bd->rmap_fields[F_CHGSTM_STATE], &state->chgstm_status, }, { bd->rmap_fields[F_VBAT_VSYS_STATUS], &state->vbat_vsys_status, }, { bd->rmap_fields[F_VBUS_VCC_STATUS], &state->vbus_vcc_status, }, }; for (i = 0; i < ARRAY_SIZE(state_fields); i++) { ret = regmap_field_read(state_fields[i].id, &tmp); if (ret) return ret; *state_fields[i].data = tmp; } if (state->vbus_vcc_status & STATUS_VCC_DET || state->vbus_vcc_status & STATUS_VBUS_DET) state->online = 1; else state->online = 0; return 0; } static irqreturn_t bd9995x_irq_handler_thread(int irq, void *private) { struct bd9995x_device *bd = private; int ret, status, mask, i; unsigned long tmp; struct bd9995x_state state; /* * The bd9995x does not seem to generate big amount of interrupts. * The logic regarding which interrupts can cause relevant * status changes seem to be pretty complex. * * So lets implement really simple and hopefully bullet-proof handler: * It does not really matter which IRQ we handle, we just go and * re-read all interesting statuses + give the framework a nudge. * * Other option would be building a _complex_ and error prone logic * trying to decide what could have been changed (resulting this IRQ * we are now handling). During the normal operation the BD99954 does * not seem to be generating much of interrupts so benefit from such * logic would probably be minimal. */ ret = regmap_read(bd->rmap, INT0_STATUS, &status); if (ret) { dev_err(bd->dev, "Failed to read IRQ status\n"); return IRQ_NONE; } ret = regmap_field_read(bd->rmap_fields[F_INT0_SET], &mask); if (ret) { dev_err(bd->dev, "Failed to read IRQ mask\n"); return IRQ_NONE; } /* Handle only IRQs that are not masked */ status &= mask; tmp = status; /* Lowest bit does not represent any sub-registers */ tmp >>= 1; /* * Mask and ack IRQs we will handle (+ the idiot bit) */ ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], 0); if (ret) { dev_err(bd->dev, "Failed to mask F_INT0\n"); return IRQ_NONE; } ret = regmap_write(bd->rmap, INT0_STATUS, status); if (ret) { dev_err(bd->dev, "Failed to ack F_INT0\n"); goto err_umask; } for_each_set_bit(i, &tmp, 7) { int sub_status, sub_mask; static const int sub_status_reg[] = { INT1_STATUS, INT2_STATUS, INT3_STATUS, INT4_STATUS, INT5_STATUS, INT6_STATUS, INT7_STATUS, }; struct regmap_field *sub_mask_f[] = { bd->rmap_fields[F_INT1_SET], bd->rmap_fields[F_INT2_SET], bd->rmap_fields[F_INT3_SET], bd->rmap_fields[F_INT4_SET], bd->rmap_fields[F_INT5_SET], bd->rmap_fields[F_INT6_SET], bd->rmap_fields[F_INT7_SET], }; /* Clear sub IRQs */ ret = regmap_read(bd->rmap, sub_status_reg[i], &sub_status); if (ret) { dev_err(bd->dev, "Failed to read IRQ sub-status\n"); goto err_umask; } ret = regmap_field_read(sub_mask_f[i], &sub_mask); if (ret) { dev_err(bd->dev, "Failed to read IRQ sub-mask\n"); goto err_umask; } /* Ack active sub-statuses */ sub_status &= sub_mask; ret = regmap_write(bd->rmap, sub_status_reg[i], sub_status); if (ret) { dev_err(bd->dev, "Failed to ack sub-IRQ\n"); goto err_umask; } } ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask); if (ret) /* May as well retry once */ goto err_umask; /* Read whole chip state */ ret = bd9995x_get_chip_state(bd, &state); if (ret < 0) { dev_err(bd->dev, "Failed to read chip state\n"); } else { mutex_lock(&bd->lock); bd->state = state; mutex_unlock(&bd->lock); power_supply_changed(bd->charger); } return IRQ_HANDLED; err_umask: ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask); if (ret) dev_err(bd->dev, "Failed to un-mask F_INT0 - IRQ permanently disabled\n"); return IRQ_NONE; } static int __bd9995x_chip_reset(struct bd9995x_device *bd) { int ret, state; int rst_check_counter = 10; u16 tmp = ALLRST | OTPLD; ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2); if (ret < 0) return ret; do { ret = regmap_field_read(bd->rmap_fields[F_OTPLD_STATE], &state); if (ret) return ret; msleep(10); } while (state == 0 && --rst_check_counter); if (!rst_check_counter) { dev_err(bd->dev, "chip reset not completed\n"); return -ETIMEDOUT; } tmp = 0; ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2); return ret; } static int bd9995x_hw_init(struct bd9995x_device *bd) { int ret; int i; struct bd9995x_state state; struct bd9995x_init_data *id = &bd->init_data; const struct { enum bd9995x_fields id; u16 value; } init_data[] = { /* Enable the charging trigger after SDP charger attached */ {F_SDP_CHG_TRIG_EN, 1}, /* Enable charging trigger after SDP charger attached */ {F_SDP_CHG_TRIG, 1}, /* Disable charging trigger by BC1.2 detection */ {F_VBUS_BC_DISEN, 1}, /* Disable charging trigger by BC1.2 detection */ {F_VCC_BC_DISEN, 1}, /* Disable automatic limitation of the input current */ {F_ILIM_AUTO_DISEN, 1}, /* Select current limitation when SDP charger attached*/ {F_SDP_500_SEL, 1}, /* Select current limitation when DCP charger attached */ {F_DCP_2500_SEL, 1}, {F_VSYSREG_SET, id->vsysreg_set}, /* Activate USB charging and DC/DC converter */ {F_USB_SUS, 0}, /* DCDC clock: 1200 kHz*/ {F_DCDC_CLK_SEL, 3}, /* Enable charging */ {F_CHG_EN, 1}, /* Disable Input current Limit setting voltage measurement */ {F_EXTIADPEN, 0}, /* Disable input current limiting */ {F_VSYS_PRIORITY, 1}, {F_IBUS_LIM_SET, id->ibus_lim_set}, {F_ICC_LIM_SET, id->icc_lim_set}, /* Charge Termination Current Setting to 0*/ {F_ITERM_SET, id->iterm_set}, /* Trickle-charge Current Setting */ {F_ITRICH_SET, id->itrich_set}, /* Pre-charge Current setting */ {F_IPRECH_SET, id->iprech_set}, /* Fast Charge Current for constant current phase */ {F_ICHG_SET, id->ichg_set}, /* Fast Charge Voltage Regulation Setting */ {F_VFASTCHG_REG_SET1, id->vfastchg_reg_set1}, /* Set Pre-charge Voltage Threshold for trickle charging. */ {F_VPRECHG_TH_SET, id->vprechg_th_set}, {F_VRECHG_SET, id->vrechg_set}, {F_VBATOVP_SET, id->vbatovp_set}, /* Reverse buck boost voltage Setting */ {F_VRBOOST_SET, 0}, /* Disable fast-charging watchdog */ {F_WDT_FST, 0}, /* Disable pre-charging watchdog */ {F_WDT_PRE, 0}, /* Power save off */ {F_POWER_SAVE_MODE, 0}, {F_INT1_SET, INT1_ALL}, {F_INT2_SET, INT2_ALL}, {F_INT3_SET, INT3_ALL}, {F_INT4_SET, INT4_ALL}, {F_INT5_SET, INT5_ALL}, {F_INT6_SET, INT6_ALL}, {F_INT7_SET, INT7_ALL}, }; /* * Currently we initialize charger to a known state at startup. * If we want to allow for example the boot code to initialize * charger we should get rid of this. */ ret = __bd9995x_chip_reset(bd); if (ret < 0) return ret; /* Initialize currents/voltages and other parameters */ for (i = 0; i < ARRAY_SIZE(init_data); i++) { ret = regmap_field_write(bd->rmap_fields[init_data[i].id], init_data[i].value); if (ret) { dev_err(bd->dev, "failed to initialize charger (%d)\n", ret); return ret; } } ret = bd9995x_get_chip_state(bd, &state); if (ret < 0) return ret; mutex_lock(&bd->lock); bd->state = state; mutex_unlock(&bd->lock); return 0; } static enum power_supply_property bd9995x_power_supply_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_AVG, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, /* Battery props we access through charger */ POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_MODEL_NAME, }; static const struct power_supply_desc bd9995x_power_supply_desc = { .name = "bd9995x-charger", .type = POWER_SUPPLY_TYPE_USB, .properties = bd9995x_power_supply_props, .num_properties = ARRAY_SIZE(bd9995x_power_supply_props), .get_property = bd9995x_power_supply_get_property, }; /* * Limit configurations for vbus-input-current and vcc-vacp-input-current * Minimum limit is 0 uA. Max is 511 * 32000 uA = 16352000 uA. This is * configured by writing a register so that each increment in register * value equals to 32000 uA limit increment. * * Eg, value 0x0 is limit 0, value 0x1 is limit 32000, ... * Describe the setting in linear_range table. */ static const struct linear_range input_current_limit_ranges[] = { LINEAR_RANGE(0, 0x0, 0x1ff, 32000), }; /* Possible trickle, pre-charging and termination current values */ static const struct linear_range charging_current_ranges[] = { LINEAR_RANGE(0, 0x0, 0x10, 64000), LINEAR_RANGE(1024000, 0x11, 0x1f, 0), }; /* * Fast charging voltage regulation, starting re-charging limit * and battery over voltage protection have same possible values */ static const struct linear_range charge_voltage_regulation_ranges[] = { LINEAR_RANGE(2560000, 0, 0xA0, 0), LINEAR_RANGE(2560000, 0xA0, 0x4B0, 16000), LINEAR_RANGE(19200000, 0x4B0, 0x7FF, 0), }; /* Possible VSYS voltage regulation values */ static const struct linear_range vsys_voltage_regulation_ranges[] = { LINEAR_RANGE(2560000, 0, 0x28, 0), LINEAR_RANGE(2560000, 0x28, 0x12C, 64000), LINEAR_RANGE(19200000, 0x12C, 0x1FF, 0), }; /* Possible settings for switching from trickle to pre-charging limits */ static const struct linear_range trickle_to_pre_threshold_ranges[] = { LINEAR_RANGE(2048000, 0, 0x20, 0), LINEAR_RANGE(2048000, 0x20, 0x12C, 64000), LINEAR_RANGE(19200000, 0x12C, 0x1FF, 0), }; /* Possible current values for fast-charging constant current phase */ static const struct linear_range fast_charge_current_ranges[] = { LINEAR_RANGE(0, 0, 0xFF, 64000), }; struct battery_init { const char *name; int *info_data; const struct linear_range *range; int ranges; u16 *data; }; struct dt_init { char *prop; const struct linear_range *range; int ranges; u16 *data; }; static int bd9995x_fw_probe(struct bd9995x_device *bd) { int ret; struct power_supply_battery_info *info; u32 property; int i; int regval; bool found; struct bd9995x_init_data *init = &bd->init_data; struct battery_init battery_inits[] = { { .name = "trickle-charging current", .range = &charging_current_ranges[0], .ranges = 2, .data = &init->itrich_set, }, { .name = "pre-charging current", .range = &charging_current_ranges[0], .ranges = 2, .data = &init->iprech_set, }, { .name = "pre-to-trickle charge voltage threshold", .range = &trickle_to_pre_threshold_ranges[0], .ranges = 2, .data = &init->vprechg_th_set, }, { .name = "charging termination current", .range = &charging_current_ranges[0], .ranges = 2, .data = &init->iterm_set, }, { .name = "charging re-start voltage", .range = &charge_voltage_regulation_ranges[0], .ranges = 2, .data = &init->vrechg_set, }, { .name = "battery overvoltage limit", .range = &charge_voltage_regulation_ranges[0], .ranges = 2, .data = &init->vbatovp_set, }, { .name = "fast-charging max current", .range = &fast_charge_current_ranges[0], .ranges = 1, .data = &init->ichg_set, }, { .name = "fast-charging voltage", .range = &charge_voltage_regulation_ranges[0], .ranges = 2, .data = &init->vfastchg_reg_set1, }, }; struct dt_init props[] = { { .prop = "rohm,vsys-regulation-microvolt", .range = &vsys_voltage_regulation_ranges[0], .ranges = 2, .data = &init->vsysreg_set, }, { .prop = "rohm,vbus-input-current-limit-microamp", .range = &input_current_limit_ranges[0], .ranges = 1, .data = &init->ibus_lim_set, }, { .prop = "rohm,vcc-input-current-limit-microamp", .range = &input_current_limit_ranges[0], .ranges = 1, .data = &init->icc_lim_set, }, }; /* * The power_supply_get_battery_info() does not support getting values * from ACPI. Let's fix it if ACPI is required here. */ ret = power_supply_get_battery_info(bd->charger, &info); if (ret < 0) return ret; /* Put pointers to the generic battery info */ battery_inits[0].info_data = &info->tricklecharge_current_ua; battery_inits[1].info_data = &info->precharge_current_ua; battery_inits[2].info_data = &info->precharge_voltage_max_uv; battery_inits[3].info_data = &info->charge_term_current_ua; battery_inits[4].info_data = &info->charge_restart_voltage_uv; battery_inits[5].info_data = &info->overvoltage_limit_uv; battery_inits[6].info_data = &info->constant_charge_current_max_ua; battery_inits[7].info_data = &info->constant_charge_voltage_max_uv; for (i = 0; i < ARRAY_SIZE(battery_inits); i++) { int val = *battery_inits[i].info_data; const struct linear_range *range = battery_inits[i].range; int ranges = battery_inits[i].ranges; if (val == -EINVAL) continue; ret = linear_range_get_selector_low_array(range, ranges, val, &regval, &found); if (ret) { dev_err(bd->dev, "Unsupported value for %s\n", battery_inits[i].name); power_supply_put_battery_info(bd->charger, info); return -EINVAL; } if (!found) { dev_warn(bd->dev, "Unsupported value for %s - using smaller\n", battery_inits[i].name); } *(battery_inits[i].data) = regval; } power_supply_put_battery_info(bd->charger, info); for (i = 0; i < ARRAY_SIZE(props); i++) { ret = device_property_read_u32(bd->dev, props[i].prop, &property); if (ret < 0) { dev_err(bd->dev, "failed to read %s", props[i].prop); return ret; } ret = linear_range_get_selector_low_array(props[i].range, props[i].ranges, property, &regval, &found); if (ret) { dev_err(bd->dev, "Unsupported value for '%s'\n", props[i].prop); return -EINVAL; } if (!found) { dev_warn(bd->dev, "Unsupported value for '%s' - using smaller\n", props[i].prop); } *(props[i].data) = regval; } return 0; } static void bd9995x_chip_reset(void *bd) { __bd9995x_chip_reset(bd); } static int bd9995x_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct bd9995x_device *bd; struct power_supply_config psy_cfg = {}; int ret; int i; bd = devm_kzalloc(dev, sizeof(*bd), GFP_KERNEL); if (!bd) return -ENOMEM; bd->client = client; bd->dev = dev; psy_cfg.drv_data = bd; psy_cfg.of_node = dev->of_node; mutex_init(&bd->lock); bd->rmap = devm_regmap_init_i2c(client, &bd9995x_regmap_config); if (IS_ERR(bd->rmap)) { dev_err(dev, "Failed to setup register access via i2c\n"); return PTR_ERR(bd->rmap); } for (i = 0; i < ARRAY_SIZE(bd9995x_reg_fields); i++) { const struct reg_field *reg_fields = bd9995x_reg_fields; bd->rmap_fields[i] = devm_regmap_field_alloc(dev, bd->rmap, reg_fields[i]); if (IS_ERR(bd->rmap_fields[i])) { dev_err(dev, "cannot allocate regmap field\n"); return PTR_ERR(bd->rmap_fields[i]); } } i2c_set_clientdata(client, bd); ret = regmap_field_read(bd->rmap_fields[F_CHIP_ID], &bd->chip_id); if (ret) { dev_err(dev, "Cannot read chip ID.\n"); return ret; } if (bd->chip_id != BD99954_ID) { dev_err(dev, "Chip with ID=0x%x, not supported!\n", bd->chip_id); return -ENODEV; } ret = regmap_field_read(bd->rmap_fields[F_CHIP_REV], &bd->chip_rev); if (ret) { dev_err(dev, "Cannot read revision.\n"); return ret; } dev_info(bd->dev, "Found BD99954 chip rev %d\n", bd->chip_rev); /* * We need to init the psy before we can call * power_supply_get_battery_info() for it */ bd->charger = devm_power_supply_register(bd->dev, &bd9995x_power_supply_desc, &psy_cfg); if (IS_ERR(bd->charger)) { dev_err(dev, "Failed to register power supply\n"); return PTR_ERR(bd->charger); } ret = bd9995x_fw_probe(bd); if (ret < 0) { dev_err(dev, "Cannot read device properties.\n"); return ret; } ret = bd9995x_hw_init(bd); if (ret < 0) { dev_err(dev, "Cannot initialize the chip.\n"); return ret; } ret = devm_add_action_or_reset(dev, bd9995x_chip_reset, bd); if (ret) return ret; return devm_request_threaded_irq(dev, client->irq, NULL, bd9995x_irq_handler_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, BD9995X_IRQ_PIN, bd); } static const struct of_device_id bd9995x_of_match[] = { { .compatible = "rohm,bd99954", }, { } }; MODULE_DEVICE_TABLE(of, bd9995x_of_match); static struct i2c_driver bd9995x_driver = { .driver = { .name = "bd9995x-charger", .of_match_table = bd9995x_of_match, }, .probe = bd9995x_probe, }; module_i2c_driver(bd9995x_driver); MODULE_AUTHOR("Laine Markus <[email protected]>"); MODULE_DESCRIPTION("ROHM BD99954 charger driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/amba/pl022.h * * Copyright (C) 2008-2009 ST-Ericsson AB * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. * * Author: Linus Walleij <[email protected]> * * Initial version inspired by: * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c * Initial adoption to PL022 by: * Sachin Verma <[email protected]> */ #ifndef _SSP_PL022_H #define _SSP_PL022_H #include <linux/dmaengine.h> #include <linux/types.h> /** * whether SSP is in loopback mode or not */ enum ssp_loopback { LOOPBACK_DISABLED, LOOPBACK_ENABLED }; /** * enum ssp_interface - interfaces allowed for this SSP Controller * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial * interface * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire * interface * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810 * &STn8815 only) */ enum ssp_interface { SSP_INTERFACE_MOTOROLA_SPI, SSP_INTERFACE_TI_SYNC_SERIAL, SSP_INTERFACE_NATIONAL_MICROWIRE, SSP_INTERFACE_UNIDIRECTIONAL }; /** * enum ssp_hierarchy - whether SSP is configured as Master or Slave */ enum ssp_hierarchy { SSP_MASTER, SSP_SLAVE }; /** * enum ssp_clock_params - clock parameters, to set SSP clock at a * desired freq */ struct ssp_clock_params { u8 cpsdvsr; /* value from 2 to 254 (even only!) */ u8 scr; /* value from 0 to 255 */ }; /** * enum ssp_rx_endian - endianess of Rx FIFO Data * this feature is only available in ST versionf of PL022 */ enum ssp_rx_endian { SSP_RX_MSB, SSP_RX_LSB }; /** * enum ssp_tx_endian - endianess of Tx FIFO Data */ enum ssp_tx_endian { SSP_TX_MSB, SSP_TX_LSB }; /** * enum ssp_data_size - number of bits in one data element */ enum ssp_data_size { SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6, SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9, SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12, SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15, SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18, SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21, SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24, SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27, SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30, SSP_DATA_BITS_31, SSP_DATA_BITS_32 }; /** * enum ssp_mode - SSP mode of operation (Communication modes) */ enum ssp_mode { INTERRUPT_TRANSFER, POLLING_TRANSFER, DMA_TRANSFER }; /** * enum ssp_rx_level_trig - receive FIFO watermark level which triggers * IT: Interrupt fires when _N_ or more elements in RX FIFO. */ enum ssp_rx_level_trig { SSP_RX_1_OR_MORE_ELEM, SSP_RX_4_OR_MORE_ELEM, SSP_RX_8_OR_MORE_ELEM, SSP_RX_16_OR_MORE_ELEM, SSP_RX_32_OR_MORE_ELEM }; /** * Transmit FIFO watermark level which triggers (IT Interrupt fires * when _N_ or more empty locations in TX FIFO) */ enum ssp_tx_level_trig { SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_TX_4_OR_MORE_EMPTY_LOC, SSP_TX_8_OR_MORE_EMPTY_LOC, SSP_TX_16_OR_MORE_EMPTY_LOC, SSP_TX_32_OR_MORE_EMPTY_LOC }; /** * enum SPI Clock Phase - clock phase (Motorola SPI interface only) * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) */ enum ssp_spi_clk_phase { SSP_CLK_FIRST_EDGE, SSP_CLK_SECOND_EDGE }; /** * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only) * @SSP_CLK_POL_IDLE_LOW: Low inactive level * @SSP_CLK_POL_IDLE_HIGH: High inactive level */ enum ssp_spi_clk_pol { SSP_CLK_POL_IDLE_LOW, SSP_CLK_POL_IDLE_HIGH }; /** * Microwire Conrol Lengths Command size in microwire format */ enum ssp_microwire_ctrl_len { SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6, SSP_BITS_7, SSP_BITS_8, SSP_BITS_9, SSP_BITS_10, SSP_BITS_11, SSP_BITS_12, SSP_BITS_13, SSP_BITS_14, SSP_BITS_15, SSP_BITS_16, SSP_BITS_17, SSP_BITS_18, SSP_BITS_19, SSP_BITS_20, SSP_BITS_21, SSP_BITS_22, SSP_BITS_23, SSP_BITS_24, SSP_BITS_25, SSP_BITS_26, SSP_BITS_27, SSP_BITS_28, SSP_BITS_29, SSP_BITS_30, SSP_BITS_31, SSP_BITS_32 }; /** * enum Microwire Wait State * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit */ enum ssp_microwire_wait_state { SSP_MWIRE_WAIT_ZERO, SSP_MWIRE_WAIT_ONE }; /** * enum ssp_duplex - whether Full/Half Duplex on microwire, only * available in the ST Micro variant. * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional, * SSPRXD not used * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is * an input. */ enum ssp_duplex { SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_MICROWIRE_CHANNEL_HALF_DUPLEX }; /** * enum ssp_clkdelay - an optional clock delay on the feedback clock * only available in the ST Micro PL023 variant. * @SSP_FEEDBACK_CLK_DELAY_NONE: no delay, the data coming in from the * slave is sampled directly * @SSP_FEEDBACK_CLK_DELAY_1T: the incoming slave data is sampled with * a delay of T-dt * @SSP_FEEDBACK_CLK_DELAY_2T: dito with a delay if 2T-dt * @SSP_FEEDBACK_CLK_DELAY_3T: dito with a delay if 3T-dt * @SSP_FEEDBACK_CLK_DELAY_4T: dito with a delay if 4T-dt * @SSP_FEEDBACK_CLK_DELAY_5T: dito with a delay if 5T-dt * @SSP_FEEDBACK_CLK_DELAY_6T: dito with a delay if 6T-dt * @SSP_FEEDBACK_CLK_DELAY_7T: dito with a delay if 7T-dt */ enum ssp_clkdelay { SSP_FEEDBACK_CLK_DELAY_NONE, SSP_FEEDBACK_CLK_DELAY_1T, SSP_FEEDBACK_CLK_DELAY_2T, SSP_FEEDBACK_CLK_DELAY_3T, SSP_FEEDBACK_CLK_DELAY_4T, SSP_FEEDBACK_CLK_DELAY_5T, SSP_FEEDBACK_CLK_DELAY_6T, SSP_FEEDBACK_CLK_DELAY_7T }; /** * CHIP select/deselect commands */ enum ssp_chip_select { SSP_CHIP_SELECT, SSP_CHIP_DESELECT }; struct dma_chan; /** * struct pl022_ssp_master - device.platform_data for SPI controller devices. * @bus_id: identifier for this bus * @enable_dma: if true enables DMA driven transfers. * @dma_filter: callback filter for dma_request_channel. * @dma_rx_param: parameter to locate an RX DMA channel. * @dma_tx_param: parameter to locate a TX DMA channel. * @autosuspend_delay: delay in ms following transfer completion before the * runtime power management system suspends the device. A setting of 0 * indicates no delay and the device will be suspended immediately. * @rt: indicates the controller should run the message pump with realtime * priority to minimise the transfer latency on the bus. */ struct pl022_ssp_controller { u16 bus_id; u8 enable_dma:1; dma_filter_fn dma_filter; void *dma_rx_param; void *dma_tx_param; int autosuspend_delay; bool rt; }; /** * struct ssp_config_chip - spi_board_info.controller_data for SPI * slave devices, copied to spi_device.controller_data. * * @iface: Interface type(Motorola, TI, Microwire, Universal) * @hierarchy: sets whether interface is master or slave * @slave_tx_disable: SSPTXD is disconnected (in slave mode only) * @clk_freq: Tune freq parameters of SSP(when in master mode) * @com_mode: communication mode: polling, Interrupt or DMA * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode) * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode) * @ctrl_len: Microwire interface: Control length * @wait_state: Microwire interface: Wait state * @duplex: Microwire interface: Full/Half duplex * @clkdelay: on the PL023 variant, the delay in feeback clock cycles * before sampling the incoming line */ struct pl022_config_chip { enum ssp_interface iface; enum ssp_hierarchy hierarchy; bool slave_tx_disable; struct ssp_clock_params clk_freq; enum ssp_mode com_mode; enum ssp_rx_level_trig rx_lev_trig; enum ssp_tx_level_trig tx_lev_trig; enum ssp_microwire_ctrl_len ctrl_len; enum ssp_microwire_wait_state wait_state; enum ssp_duplex duplex; enum ssp_clkdelay clkdelay; }; #endif /* _SSP_PL022_H */
// SPDX-License-Identifier: GPL-2.0 /* * AD5672R, AD5674R, AD5676, AD5676R, AD5679R, * AD5681R, AD5682R, AD5683, AD5683R, AD5684, * AD5684R, AD5685R, AD5686, AD5686R * Digital to analog converters driver * * Copyright 2018 Analog Devices Inc. */ #include "ad5686.h" #include <linux/module.h> #include <linux/spi/spi.h> static int ad5686_spi_write(struct ad5686_state *st, u8 cmd, u8 addr, u16 val) { struct spi_device *spi = to_spi_device(st->dev); u8 tx_len, *buf; switch (st->chip_info->regmap_type) { case AD5310_REGMAP: st->data[0].d16 = cpu_to_be16(AD5310_CMD(cmd) | val); buf = &st->data[0].d8[0]; tx_len = 2; break; case AD5683_REGMAP: st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5683_DATA(val)); buf = &st->data[0].d8[1]; tx_len = 3; break; case AD5686_REGMAP: st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5686_ADDR(addr) | val); buf = &st->data[0].d8[1]; tx_len = 3; break; default: return -EINVAL; } return spi_write(spi, buf, tx_len); } static int ad5686_spi_read(struct ad5686_state *st, u8 addr) { struct spi_transfer t[] = { { .tx_buf = &st->data[0].d8[1], .len = 3, .cs_change = 1, }, { .tx_buf = &st->data[1].d8[1], .rx_buf = &st->data[2].d8[1], .len = 3, }, }; struct spi_device *spi = to_spi_device(st->dev); u8 cmd = 0; int ret; switch (st->chip_info->regmap_type) { case AD5310_REGMAP: return -ENOTSUPP; case AD5683_REGMAP: cmd = AD5686_CMD_READBACK_ENABLE_V2; break; case AD5686_REGMAP: cmd = AD5686_CMD_READBACK_ENABLE; break; default: return -EINVAL; } st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5686_ADDR(addr)); st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP)); ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t)); if (ret < 0) return ret; return be32_to_cpu(st->data[2].d32); } static int ad5686_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); return ad5686_probe(&spi->dev, id->driver_data, id->name, ad5686_spi_write, ad5686_spi_read); } static void ad5686_spi_remove(struct spi_device *spi) { ad5686_remove(&spi->dev); } static const struct spi_device_id ad5686_spi_id[] = { {"ad5310r", ID_AD5310R}, {"ad5672r", ID_AD5672R}, {"ad5674r", ID_AD5674R}, {"ad5676", ID_AD5676}, {"ad5676r", ID_AD5676R}, {"ad5679r", ID_AD5679R}, {"ad5681r", ID_AD5681R}, {"ad5682r", ID_AD5682R}, {"ad5683", ID_AD5683}, {"ad5683r", ID_AD5683R}, {"ad5684", ID_AD5684}, {"ad5684r", ID_AD5684R}, {"ad5685", ID_AD5685R}, /* Does not exist */ {"ad5685r", ID_AD5685R}, {"ad5686", ID_AD5686}, {"ad5686r", ID_AD5686R}, {} }; MODULE_DEVICE_TABLE(spi, ad5686_spi_id); static struct spi_driver ad5686_spi_driver = { .driver = { .name = "ad5686", }, .probe = ad5686_spi_probe, .remove = ad5686_spi_remove, .id_table = ad5686_spi_id, }; module_spi_driver(ad5686_spi_driver); MODULE_AUTHOR("Stefan Popa <[email protected]>"); MODULE_DESCRIPTION("Analog Devices AD5686 and similar multi-channel DACs"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("IIO_AD5686");
/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved. */ #ifndef WIL6210_TXRX_EDMA_H #define WIL6210_TXRX_EDMA_H #include "wil6210.h" /* limit status ring size in range [ring size..max ring size] */ #define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN) #define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX) /* RX sring order should be bigger than RX ring order */ #define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12) #define WIL_TX_SRING_SIZE_ORDER_DEFAULT (14) #define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600) #define WIL_DEFAULT_RX_STATUS_RING_ID 0 #define WIL_RX_DESC_RING_ID 0 #define WIL_RX_STATUS_IRQ_IDX 0 #define WIL_TX_STATUS_IRQ_IDX 1 #define WIL_EDMA_AGG_WATERMARK (0xffff) #define WIL_EDMA_AGG_WATERMARK_POS (16) #define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50) #define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */ /* Error field */ #define WIL_RX_EDMA_ERROR_MIC (1) #define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */ #define WIL_RX_EDMA_ERROR_REPLAY (3) #define WIL_RX_EDMA_ERROR_AMSDU (4) #define WIL_RX_EDMA_ERROR_FCS (7) #define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1)) #define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1)) #define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11) #define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7 #define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf #define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2 #define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4 #define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5 #define WIL_RX_EDMA_MID_VALID_BIT BIT(20) #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16 #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6 #define WIL_EDMA_DESC_TX_CFG_EOP_POS 0 #define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1 #define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3 #define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2 #define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5 #define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1 #define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6 #define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1 #define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7 #define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1 #define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15 #define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1 #define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5 #define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1 /* Enhanced Rx descriptor - MAC part * [dword 0] : Reserved * [dword 1] : Reserved * [dword 2] : Reserved * [dword 3] * bit 0..15 : Buffer ID * bit 16..31 : Reserved */ struct wil_ring_rx_enhanced_mac { u32 d[3]; __le16 buff_id; u16 reserved; } __packed; /* Enhanced Rx descriptor - DMA part * [dword 0] - Reserved * [dword 1] * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31 * [dword 2] * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47 * bit 16..31 : Reserved * [dword 3] * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63 * bit 16..31 : length */ struct wil_ring_rx_enhanced_dma { u32 d0; struct wil_ring_dma_addr addr; u16 w5; __le16 addr_high_high; __le16 length; } __packed; struct wil_rx_enhanced_desc { struct wil_ring_rx_enhanced_mac mac; struct wil_ring_rx_enhanced_dma dma; } __packed; /* Enhanced Tx descriptor - DMA part * [dword 0] * Same as legacy * [dword 1] * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31 * [dword 2] * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47 * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum * offload feature * bit 24..30 : mac_length:7 * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6 * [dword 3] * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63 * bit 16..31 : length */ struct wil_ring_tx_enhanced_dma { u8 l4_hdr_len; u8 cmd; u16 w1; struct wil_ring_dma_addr addr; u8 ip_length; u8 b11; /* 0..6: mac_length; 7:ip_version */ __le16 addr_high_high; __le16 length; } __packed; /* Enhanced Tx descriptor - MAC part * [dword 0] * bit 0.. 9 : lifetime_expiry_value:10 * bit 10 : interrupt_en:1 * bit 11 : status_en:1 * bit 12..13 : txss_override:2 * bit 14 : timestamp_insertion:1 * bit 15 : duration_preserve:1 * bit 16..21 : reserved0:6 * bit 22..26 : mcs_index:5 * bit 27 : mcs_en:1 * bit 28..30 : reserved1:3 * bit 31 : sn_preserved:1 * [dword 1] * bit 0.. 3 : pkt_mode:4 * bit 4 : pkt_mode_en:1 * bit 5..14 : reserved0:10 * bit 15 : ack_policy_en:1 * bit 16..19 : dst_index:4 * bit 20 : dst_index_en:1 * bit 21..22 : ack_policy:2 * bit 23 : lifetime_en:1 * bit 24..30 : max_retry:7 * bit 31 : max_retry_en:1 * [dword 2] * bit 0.. 7 : num_of_descriptors:8 * bit 8..17 : reserved:10 * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11 * bit 20 : snap_hdr_insertion_en:1 * bit 21 : vlan_removal_en:1 * bit 22..23 : reserved0:2 * bit 24 : Dest ID extension:1 * bit 25..31 : reserved0:7 * [dword 3] * bit 0..15 : tso_mss:16 * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode */ struct wil_ring_tx_enhanced_mac { u32 d[3]; __le16 tso_mss; u16 scratchpad; } __packed; struct wil_tx_enhanced_desc { struct wil_ring_tx_enhanced_mac mac; struct wil_ring_tx_enhanced_dma dma; } __packed; #define TX_STATUS_DESC_READY_POS 7 /* Enhanced TX status message * [dword 0] * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that * are used to form the packets. It is needed for WB when * releasing the packet * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to * the message * bit 16..23 : Status:8 - The TX status Code * 0x0 - A successful transmission * 0x1 - Retry expired * 0x2 - Lifetime Expired * 0x3 - Released * 0x4-0xFF - Reserved * bit 24..30 : Reserved:7 * bit 31 : Descriptor Ready bit:1 - It is initiated to * zero by the driver when the ring is created. It is set by the HW * to one for each completed status message. Each wrap around, * the DR bit value is flipped. * [dword 1] * bit 0..31 : timestamp:32 - Set when MPDU is transmitted. * [dword 2] * bit 0.. 4 : MCS:5 - The transmitted MCS value * bit 5 : Reserved:1 * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide * bit 8..12 : QID:5 - The QID that was used for the transmission * bit 13..15 : Reserved:3 * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation * bit 21..22 : Reserved:2 * bit 23 : Retry:1 - An indication that the transmission was retried * bit 24..31 : TX-Sector:8 - the antenna sector that was used for * transmission * [dword 3] * bit 0..11 : Sequence number:12 - The Sequence Number that was used * for the MPDU transmission * bit 12..31 : Reserved:20 */ struct wil_ring_tx_status { u8 num_descriptors; u8 ring_id; u8 status; u8 desc_ready; /* Only the last bit should be set */ u32 timestamp; u32 d2; u16 seq_number; /* Only the first 12 bits */ u16 w7; } __packed; /* Enhanced Rx status message - compressed part * [dword 0] * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error, * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum * calculated, Bit1- L3Err - IPv4 Checksum Error * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum * calculated, Bit1- L4Err - TCP/UDP Checksum Error * bit 7 : Reserved:1 * bit 8..19 : Flow ID:12 - MSDU flow ID * bit 20 : MID_V:1 - The MAC ID field is valid * bit 21..22 : MID:2 - The MAC ID * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4 * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP * bit 25 : BC:1 - The received MPDU is broadcast * bit 26 : MC:1 - The received MPDU is multicast * bit 27 : Raw:1 - The MPDU received with no translation * bit 28 : Sec:1 - The FC control (b14) - Frame Protected * bit 29 : Error:1 - An error is set when (L2 status != 0) || * (L3 status == 3) || (L4 status == 3) * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end * of the transfer, otherwise the status indicates buffer * only completion. * bit 31 : Descriptor Ready bit:1 - It is initiated to * zero by the driver when the ring is created. It is set * by the HW to one for each completed status message. * Each wrap around, the DR bit value is flipped. * [dword 1] * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide * bit 24..27 : Data Offset:4 - The data offset, a code that describe the * payload shift from the beginning of the buffer: * 0 - 0 Bytes, 3 - 2 Bytes * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU * bit 31 : Key ID:1 - The extracted Key ID from the encryption header * [dword 2] * bit 0..15 : Buffer ID:16 - The Buffer Identifier * bit 16..31 : Length:16 - It indicates the valid bytes that are stored * in the current descriptor buffer. For multiple buffer * descriptor, SW need to sum the total descriptor length * in all buffers to produce the packet length * [dword 3] * bit 0..31 : timestamp:32 - The MPDU Timestamp. */ struct wil_rx_status_compressed { u32 d0; u32 d1; __le16 buff_id; __le16 length; u32 timestamp; } __packed; /* Enhanced Rx status message - extension part * [dword 0] * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received * from * bit 5.. 7 : Reserved:3 * bit 8..11 : TID:4 - The QoS (b3-0) TID Field * bit 12..15 Source index:4 - The Source index that was found during Parsing the TA. This field is used to define the source of the packet * bit 16..18 : Destination index:3 - The Destination index that was found during Parsing the RA. * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an interrupt after it writes the packet * bit 23 : ESOP:1 - The QoS (b4) ESOP field * bit 24 : RDG:1 * bit 25..31 : Reserved:7 * [dword 1] * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type (management, data, control and extension) * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended * Subtype * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit * bit 15..23 : Reserved:9 * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received * MPDU * [dword 2] * bit 0..11 : SN:12 - The received Sequence number field * bit 12..15 : Reserved:4 * bit 16..31 : PN bits [15:0]:16 * [dword 3] * bit 0..31 : PN bits [47:16]:32 */ struct wil_rx_status_extension { u32 d0; u32 d1; __le16 seq_num; /* only lower 12 bits */ struct_group_attr(pn, __packed, u16 pn_15_0; u32 pn_47_16; ); } __packed; struct wil_rx_status_extended { struct wil_rx_status_compressed comp; struct wil_rx_status_extension ext; } __packed; static inline void *wil_skb_rxstatus(struct sk_buff *skb) { return (void *)skb->cb; } static inline __le16 wil_rx_status_get_length(void *msg) { return ((struct wil_rx_status_compressed *)msg)->length; } static inline u8 wil_rx_status_get_mcs(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, 16, 21); } static inline u8 wil_rx_status_get_cb_mode(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, 22, 23); } static inline u16 wil_rx_status_get_flow_id(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 8, 19); } static inline u8 wil_rx_status_get_mcast(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 26, 26); } /** * In case of DLPF miss the parsing of flow Id should be as follows: * dest_id:2 * src_id :3 - cid * tid:3 * Otherwise: * tid:4 * cid:4 */ static inline u8 wil_rx_status_get_cid(void *msg) { u16 val = wil_rx_status_get_flow_id(msg); if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT) /* CID is in bits 2..4 */ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; else /* CID is in bits 4..7 */ return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) & WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK; } static inline u8 wil_rx_status_get_tid(void *msg) { u16 val = wil_rx_status_get_flow_id(msg); if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT) /* TID is in bits 5..7 */ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; else /* TID is in bits 0..3 */ return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; } static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */ { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 30, 30); } static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s) { ((struct wil_rx_status_compressed *) (s->va + (s->elem_size * s->swhead)))->buff_id = 0; } static inline __le16 wil_rx_status_get_buff_id(void *msg) { return ((struct wil_rx_status_compressed *)msg)->buff_id; } static inline u8 wil_rx_status_get_data_offset(void *msg) { u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, 24, 27); switch (val) { case 0: return 0; case 3: return 2; default: return 0xFF; } } static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil, void *msg) { if (wil->use_compressed_rx_status) return IEEE80211_FTYPE_DATA; return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1, 0, 1) << 2; } static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg) { if (wil->use_compressed_rx_status) return 0; return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1, 0, 5) << 2; } static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg) { if (wil->use_compressed_rx_status) return 0; return ((struct wil_rx_status_extended *)msg)->ext.seq_num; } static inline u8 wil_rx_status_get_retry(void *msg) { /* retry bit is missing in EDMA HW. return 1 to be on the safe side */ return 1; } static inline int wil_rx_status_get_mid(void *msg) { if (!(((struct wil_rx_status_compressed *)msg)->d0 & WIL_RX_EDMA_MID_VALID_BIT)) return 0; /* use the default MID */ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 21, 22); } static inline int wil_rx_status_get_error(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 29, 29); } static inline int wil_rx_status_get_l2_rx_status(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 0, 2); } static inline int wil_rx_status_get_l3_rx_status(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 3, 4); } static inline int wil_rx_status_get_l4_rx_status(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 5, 6); } /* L4 L3 Expected result * 0 0 Ok. No L3 and no L4 known protocols found. * Treated as L2 packet. (no offloads on this packet) * 0 1 Ok. It means that L3 was found, and checksum check passed. * No known L4 protocol was found. * 0 2 It means that L3 protocol was found, and checksum check failed. * No L4 known protocol was found. * 1 any Ok. It means that L4 was found, and checksum check passed. * 3 0 Not a possible scenario. * 3 1 Recalculate. It means that L3 protocol was found, and checksum * passed. But L4 checksum failed. Need to see if really failed, * or due to fragmentation. * 3 2 Both L3 and L4 checksum check failed. */ static inline int wil_rx_status_get_checksum(void *msg, struct wil_net_stats *stats) { int l3_rx_status = wil_rx_status_get_l3_rx_status(msg); int l4_rx_status = wil_rx_status_get_l4_rx_status(msg); if (l4_rx_status == 1) return CHECKSUM_UNNECESSARY; if (l4_rx_status == 0 && l3_rx_status == 1) return CHECKSUM_UNNECESSARY; if (l3_rx_status == 0 && l4_rx_status == 0) /* L2 packet */ return CHECKSUM_NONE; /* If HW reports bad checksum, let IP stack re-check it * For example, HW doesn't understand Microsoft IP stack that * mis-calculates TCP checksum - if it should be 0x0, * it writes 0xffff in violation of RFC 1624 */ stats->rx_csum_err++; return CHECKSUM_NONE; } static inline int wil_rx_status_get_security(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, 28, 28); } static inline u8 wil_rx_status_get_key_id(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, 31, 31); } static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg) { return WIL_GET_BITS(msg->d2, 0, 4); } static inline u32 wil_ring_next_head(struct wil_ring *ring) { return (ring->swhead + 1) % ring->size; } static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr, __le16 *addr_high_high, dma_addr_t pa) { addr->addr_low = cpu_to_le32(lower_32_bits(pa)); addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa)); *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16)); } static inline dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma) { return le32_to_cpu(dma->addr.addr_low) | ((u64)le16_to_cpu(dma->addr.addr_high) << 32) | ((u64)le16_to_cpu(dma->addr_high_high) << 48); } static inline dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma) { return le32_to_cpu(dma->addr.addr_low) | ((u64)le16_to_cpu(dma->addr.addr_high) << 32) | ((u64)le16_to_cpu(dma->addr_high_high) << 48); } void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil); int wil_tx_sring_handler(struct wil6210_priv *wil, struct wil_status_ring *sring); void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota); void wil_init_txrx_ops_edma(struct wil6210_priv *wil); #endif /* WIL6210_TXRX_EDMA_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Calxeda, Inc. * * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7 * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. * * Maintainer: Rob Herring <[email protected]> */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/psci.h> #include <asm/cpuidle.h> #include <asm/suspend.h> #include <uapi/linux/psci.h> #define CALXEDA_IDLE_PARAM \ ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int calxeda_idle_finish(unsigned long val) { return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume)); } static int calxeda_pwrdown_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_pm_enter(); cpu_suspend(0, calxeda_idle_finish); cpu_pm_exit(); return index; } static struct cpuidle_driver calxeda_idle_driver = { .name = "calxeda_idle", .states = { ARM_CPUIDLE_WFI_STATE, { .name = "PG", .desc = "Power Gate", .exit_latency = 30, .power_usage = 50, .target_residency = 200, .enter = calxeda_pwrdown_idle, }, }, .state_count = 2, }; static int calxeda_cpuidle_probe(struct platform_device *pdev) { return cpuidle_register(&calxeda_idle_driver, NULL); } static struct platform_driver calxeda_cpuidle_plat_driver = { .driver = { .name = "cpuidle-calxeda", }, .probe = calxeda_cpuidle_probe, }; builtin_platform_driver(calxeda_cpuidle_plat_driver);
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2020 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_ #define ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_ /* ***************************************** * DCORE0_TPC0_CFG_KERNEL * (Prototype: TPC_NON_TENSOR_DESCRIPTOR) ***************************************** */ #define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0x400B508 #define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0x400B50C #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_0 0x400B510 #define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_0 0x400B514 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_1 0x400B518 #define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_1 0x400B51C #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_2 0x400B520 #define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_2 0x400B524 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_3 0x400B528 #define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_3 0x400B52C #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_4 0x400B530 #define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_4 0x400B534 #define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG 0x400B538 #define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_ID 0x400B53C #define mmDCORE0_TPC0_CFG_KERNEL_POWER_LOOP 0x400B540 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_0 0x400B544 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_1 0x400B548 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_2 0x400B54C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_3 0x400B550 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_4 0x400B554 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_5 0x400B558 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_6 0x400B55C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_7 0x400B560 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_8 0x400B564 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_9 0x400B568 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_10 0x400B56C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_11 0x400B570 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_12 0x400B574 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_13 0x400B578 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_14 0x400B57C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_15 0x400B580 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_16 0x400B584 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_17 0x400B588 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_18 0x400B58C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_19 0x400B590 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_20 0x400B594 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_21 0x400B598 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_22 0x400B59C #define mmDCORE0_TPC0_CFG_KERNEL_SRF_23 0x400B5A0 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_24 0x400B5A4 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_25 0x400B5A8 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_26 0x400B5AC #define mmDCORE0_TPC0_CFG_KERNEL_SRF_27 0x400B5B0 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_28 0x400B5B4 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_29 0x400B5B8 #define mmDCORE0_TPC0_CFG_KERNEL_SRF_30 0x400B5BC #define mmDCORE0_TPC0_CFG_KERNEL_SRF_31 0x400B5C0 #define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_ID_INC 0x400B5C4 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_0 0x400B5C8 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_1 0x400B5CC #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_2 0x400B5D0 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_3 0x400B5D4 #define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_4 0x400B5D8 #endif /* ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes. * * Copyright (C) 2012 David S. Miller <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 #define CAMELLIA_TABLE_BYTE_LEN 272 struct camellia_sparc64_ctx { u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; int key_len; }; extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key, unsigned int key_len, u64 *decrypt_key); static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key, unsigned int key_len) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u32 *in_key = (const u32 *) _in_key; if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; ctx->key_len = key_len; camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0], key_len, &ctx->decrypt_key[0]); return 0; } static int camellia_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { return camellia_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); } extern void camellia_sparc64_crypt(const u64 *key, const u32 *input, u32 *output, unsigned int key_len); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->encrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->decrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len); typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key); extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds; extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds; static int __ecb_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; ecb_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_ecb_crypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_ecb_crypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) key = &ctx->encrypt_key[0]; else key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static int ecb_encrypt(struct skcipher_request *req) { return __ecb_crypt(req, true); } static int ecb_decrypt(struct skcipher_request *req) { return __ecb_crypt(req, false); } typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key, u64 *iv); extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds; static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_cbc_encrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_encrypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; key = &ctx->encrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_cbc_decrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_decrypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static struct crypto_alg cipher_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE, .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE, .cia_setkey = camellia_set_key, .cia_encrypt = camellia_encrypt, .cia_decrypt = camellia_decrypt } } }; static struct skcipher_alg skcipher_algs[] = { { .base.cra_name = "ecb(camellia)", .base.cra_driver_name = "ecb-camellia-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .setkey = camellia_set_key_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(camellia)", .base.cra_driver_name = "cbc-camellia-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_set_key_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, } }; static bool __init sparc64_has_camellia_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_CAMELLIA)) return false; return true; } static int __init camellia_sparc64_mod_init(void) { int err; if (!sparc64_has_camellia_opcode()) { pr_info("sparc64 camellia opcodes not available.\n"); return -ENODEV; } pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); err = crypto_register_alg(&cipher_alg); if (err) return err; err = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (err) crypto_unregister_alg(&cipher_alg); return err; } static void __exit camellia_sparc64_mod_fini(void) { crypto_unregister_alg(&cipher_alg); crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(camellia_sparc64_mod_init); module_exit(camellia_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); MODULE_ALIAS_CRYPTO("camellia"); #include "crop_devid.c"
// SPDX-License-Identifier: GPL-2.0+ // Copyright 2018 IBM Corporation #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/reset.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_device.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> #include <drm/drm_drv.h> #include "aspeed_gfx.h" /** * DOC: ASPEED GFX Driver * * This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called * the 'SOC Display Controller' in the datasheet. This driver runs on the ARM * based BMC systems, unlike the ast driver which runs on a host CPU and is for * a PCIe graphics device. * * The AST2500 supports a total of 3 output paths: * * 1. VGA output, the output target can choose either or both to the DAC * or DVO interface. * * 2. Graphics CRT output, the output target can choose either or both to * the DAC or DVO interface. * * 3. Video input from DVO, the video input can be used for video engine * capture or DAC display output. * * Output options are selected in SCU2C. * * The "VGA mode" device is the PCI attached controller. The "Graphics CRT" * is the ARM's internal display controller. * * The driver only supports a simple configuration consisting of a 40MHz * pixel clock, fixed by hardware limitations, and the VGA output path. * * The driver was written with the 'AST2500 Software Programming Guide' v17, * which is available under NDA from ASPEED. */ struct aspeed_gfx_config { u32 dac_reg; /* DAC register in SCU */ u32 int_clear_reg; /* Interrupt clear register */ u32 vga_scratch_reg; /* VGA scratch register in SCU */ u32 throd_val; /* Default Threshold Seting */ u32 scan_line_max; /* Max memory size of one scan line */ }; static const struct aspeed_gfx_config ast2400_config = { .dac_reg = 0x2c, .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12), .scan_line_max = 64, }; static const struct aspeed_gfx_config ast2500_config = { .dac_reg = 0x2c, .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c), .scan_line_max = 128, }; static const struct aspeed_gfx_config ast2600_config = { .dac_reg = 0xc0, .int_clear_reg = 0x68, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70), .scan_line_max = 128, }; static const struct of_device_id aspeed_gfx_match[] = { { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config }, { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config }, { .compatible = "aspeed,ast2600-gfx", .data = &ast2600_config }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_gfx_match); static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int aspeed_gfx_setup_mode_config(struct drm_device *drm) { int ret; ret = drmm_mode_config_init(drm); if (ret) return ret; drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = 800; drm->mode_config.max_height = 600; drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; return ret; } static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) { struct drm_device *drm = data; struct aspeed_gfx *priv = to_aspeed_gfx(drm); u32 reg; reg = readl(priv->base + CRT_CTRL1); if (reg & CRT_CTRL_VERTICAL_INTR_STS) { drm_crtc_handle_vblank(&priv->pipe.crtc); writel(reg, priv->base + priv->int_clr_reg); return IRQ_HANDLED; } return IRQ_NONE; } static int aspeed_gfx_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct aspeed_gfx *priv = to_aspeed_gfx(drm); struct device_node *np = pdev->dev.of_node; const struct aspeed_gfx_config *config; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(drm->dev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); config = device_get_match_data(&pdev->dev); if (!config) return -EINVAL; priv->dac_reg = config->dac_reg; priv->int_clr_reg = config->int_clear_reg; priv->vga_scratch_reg = config->vga_scratch_reg; priv->throd_val = config->throd_val; priv->scan_line_max = config->scan_line_max; priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon"); if (IS_ERR(priv->scu)) { priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); if (IS_ERR(priv->scu)) { dev_err(&pdev->dev, "failed to find SCU regmap\n"); return PTR_ERR(priv->scu); } } ret = of_reserved_mem_device_init(drm->dev); if (ret) { dev_err(&pdev->dev, "failed to initialize reserved mem: %d\n", ret); return ret; } ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret); return ret; } priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rst)) { dev_err(&pdev->dev, "missing or invalid reset controller device tree entry"); return PTR_ERR(priv->rst); } reset_control_deassert(priv->rst); priv->clk = devm_clk_get(drm->dev, NULL); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "missing or invalid clk device tree entry"); return PTR_ERR(priv->clk); } clk_prepare_enable(priv->clk); /* Sanitize control registers */ writel(0, priv->base + CRT_CTRL1); writel(0, priv->base + CRT_CTRL2); ret = aspeed_gfx_setup_mode_config(drm); if (ret < 0) return ret; ret = drm_vblank_init(drm, 1); if (ret < 0) { dev_err(drm->dev, "Failed to initialise vblank\n"); return ret; } ret = aspeed_gfx_create_output(drm); if (ret < 0) { dev_err(drm->dev, "Failed to create outputs\n"); return ret; } ret = aspeed_gfx_create_pipe(drm); if (ret < 0) { dev_err(drm->dev, "Cannot setup simple display pipe\n"); return ret; } ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0), aspeed_gfx_irq_handler, 0, "aspeed gfx", drm); if (ret < 0) { dev_err(drm->dev, "Failed to install IRQ handler\n"); return ret; } drm_mode_config_reset(drm); return 0; } static void aspeed_gfx_unload(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); } DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "aspeed-gfx-drm", .desc = "ASPEED GFX DRM", .date = "20180319", .major = 1, .minor = 0, }; static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aspeed_gfx *priv = dev_get_drvdata(dev); u32 val; int rc; rc = kstrtou32(buf, 0, &val); if (rc) return rc; if (val > 3) return -EINVAL; rc = regmap_update_bits(priv->scu, priv->dac_reg, 0x30000, val << 16); if (rc < 0) return 0; return count; } static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf) { struct aspeed_gfx *priv = dev_get_drvdata(dev); u32 reg; int rc; rc = regmap_read(priv->scu, priv->dac_reg, &reg); if (rc) return rc; return sprintf(buf, "%u\n", (reg >> 16) & 0x3); } static DEVICE_ATTR_RW(dac_mux); static ssize_t vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct aspeed_gfx *priv = dev_get_drvdata(dev); u32 reg; int rc; rc = regmap_read(priv->scu, priv->vga_scratch_reg, &reg); if (rc) return rc; return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw); static struct attribute *aspeed_sysfs_entries[] = { &dev_attr_vga_pw.attr, &dev_attr_dac_mux.attr, NULL, }; static struct attribute_group aspeed_sysfs_attr_group = { .attrs = aspeed_sysfs_entries, }; static int aspeed_gfx_probe(struct platform_device *pdev) { struct aspeed_gfx *priv; int ret; priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver, struct aspeed_gfx, drm); if (IS_ERR(priv)) return PTR_ERR(priv); ret = aspeed_gfx_load(&priv->drm); if (ret) return ret; platform_set_drvdata(pdev, priv); ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); if (ret) return ret; ret = drm_dev_register(&priv->drm, 0); if (ret) goto err_unload; drm_client_setup(&priv->drm, NULL); return 0; err_unload: sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); aspeed_gfx_unload(&priv->drm); return ret; } static void aspeed_gfx_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); drm_dev_unregister(drm); aspeed_gfx_unload(drm); drm_atomic_helper_shutdown(drm); } static void aspeed_gfx_shutdown(struct platform_device *pdev) { drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); } static struct platform_driver aspeed_gfx_platform_driver = { .probe = aspeed_gfx_probe, .remove = aspeed_gfx_remove, .shutdown = aspeed_gfx_shutdown, .driver = { .name = "aspeed_gfx", .of_match_table = aspeed_gfx_match, }, }; drm_module_platform_driver(aspeed_gfx_platform_driver); MODULE_AUTHOR("Joel Stanley <[email protected]>"); MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Analog Devices (Linear Technology) LT3651 charger IC. * Copyright (C) 2017, Topic Embedded Products */ #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/of.h> struct lt3651_charger { struct power_supply *charger; struct power_supply_desc charger_desc; struct gpio_desc *acpr_gpio; struct gpio_desc *fault_gpio; struct gpio_desc *chrg_gpio; }; static irqreturn_t lt3651_charger_irq(int irq, void *devid) { struct power_supply *charger = devid; power_supply_changed(charger); return IRQ_HANDLED; } static inline struct lt3651_charger *psy_to_lt3651_charger( struct power_supply *psy) { return power_supply_get_drvdata(psy); } static int lt3651_charger_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct lt3651_charger *lt3651_charger = psy_to_lt3651_charger(psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (!lt3651_charger->chrg_gpio) { val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; } if (gpiod_get_value(lt3651_charger->chrg_gpio)) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = gpiod_get_value(lt3651_charger->acpr_gpio); break; case POWER_SUPPLY_PROP_HEALTH: if (!lt3651_charger->fault_gpio) { val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; break; } if (!gpiod_get_value(lt3651_charger->fault_gpio)) { val->intval = POWER_SUPPLY_HEALTH_GOOD; break; } /* * If the fault pin is active, the chrg pin explains the type * of failure. */ if (!lt3651_charger->chrg_gpio) { val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; break; } val->intval = gpiod_get_value(lt3651_charger->chrg_gpio) ? POWER_SUPPLY_HEALTH_OVERHEAT : POWER_SUPPLY_HEALTH_DEAD; break; default: return -EINVAL; } return 0; } static enum power_supply_property lt3651_charger_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_HEALTH, }; static int lt3651_charger_probe(struct platform_device *pdev) { struct power_supply_config psy_cfg = {}; struct lt3651_charger *lt3651_charger; struct power_supply_desc *charger_desc; int ret; lt3651_charger = devm_kzalloc(&pdev->dev, sizeof(*lt3651_charger), GFP_KERNEL); if (!lt3651_charger) return -ENOMEM; lt3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev, "lltc,acpr", GPIOD_IN); if (IS_ERR(lt3651_charger->acpr_gpio)) { ret = PTR_ERR(lt3651_charger->acpr_gpio); dev_err(&pdev->dev, "Failed to acquire acpr GPIO: %d\n", ret); return ret; } lt3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev, "lltc,fault", GPIOD_IN); if (IS_ERR(lt3651_charger->fault_gpio)) { ret = PTR_ERR(lt3651_charger->fault_gpio); dev_err(&pdev->dev, "Failed to acquire fault GPIO: %d\n", ret); return ret; } lt3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev, "lltc,chrg", GPIOD_IN); if (IS_ERR(lt3651_charger->chrg_gpio)) { ret = PTR_ERR(lt3651_charger->chrg_gpio); dev_err(&pdev->dev, "Failed to acquire chrg GPIO: %d\n", ret); return ret; } charger_desc = &lt3651_charger->charger_desc; charger_desc->name = pdev->dev.of_node->name; charger_desc->type = POWER_SUPPLY_TYPE_MAINS; charger_desc->properties = lt3651_charger_properties; charger_desc->num_properties = ARRAY_SIZE(lt3651_charger_properties); charger_desc->get_property = lt3651_charger_get_property; psy_cfg.of_node = pdev->dev.of_node; psy_cfg.drv_data = lt3651_charger; lt3651_charger->charger = devm_power_supply_register(&pdev->dev, charger_desc, &psy_cfg); if (IS_ERR(lt3651_charger->charger)) { ret = PTR_ERR(lt3651_charger->charger); dev_err(&pdev->dev, "Failed to register power supply: %d\n", ret); return ret; } /* * Acquire IRQs for the GPIO pins if possible. If the system does not * support IRQs on these pins, userspace will have to poll the sysfs * files manually. */ if (lt3651_charger->acpr_gpio) { ret = gpiod_to_irq(lt3651_charger->acpr_gpio); if (ret >= 0) ret = devm_request_any_context_irq(&pdev->dev, ret, lt3651_charger_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), lt3651_charger->charger); if (ret < 0) dev_warn(&pdev->dev, "Failed to request acpr irq\n"); } if (lt3651_charger->fault_gpio) { ret = gpiod_to_irq(lt3651_charger->fault_gpio); if (ret >= 0) ret = devm_request_any_context_irq(&pdev->dev, ret, lt3651_charger_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), lt3651_charger->charger); if (ret < 0) dev_warn(&pdev->dev, "Failed to request fault irq\n"); } if (lt3651_charger->chrg_gpio) { ret = gpiod_to_irq(lt3651_charger->chrg_gpio); if (ret >= 0) ret = devm_request_any_context_irq(&pdev->dev, ret, lt3651_charger_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), lt3651_charger->charger); if (ret < 0) dev_warn(&pdev->dev, "Failed to request chrg irq\n"); } platform_set_drvdata(pdev, lt3651_charger); return 0; } static const struct of_device_id lt3651_charger_match[] = { { .compatible = "lltc,ltc3651-charger" }, /* DEPRECATED */ { .compatible = "lltc,lt3651-charger" }, { } }; MODULE_DEVICE_TABLE(of, lt3651_charger_match); static struct platform_driver lt3651_charger_driver = { .probe = lt3651_charger_probe, .driver = { .name = "lt3651-charger", .of_match_table = lt3651_charger_match, }, }; module_platform_driver(lt3651_charger_driver); MODULE_AUTHOR("Mike Looijmans <[email protected]>"); MODULE_DESCRIPTION("Driver for LT3651 charger"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lt3651-charger");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USBMIXER_H #define __USBMIXER_H #include <sound/info.h> struct media_mixer_ctl; struct usbmix_connector_map { u8 id; u8 delegated_id; u8 control; u8 channel; }; struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; struct list_head list; unsigned int ignore_ctl_error; struct urb *urb; /* array[MAX_ID_ELEMS], indexed by unit id */ struct usb_mixer_elem_list **id_elems; /* the usb audio specification version this interface complies to */ int protocol; /* optional connector delegation map */ const struct usbmix_connector_map *connector_map; /* Sound Blaster remote control stuff */ const struct rc_config *rc_cfg; u32 rc_code; wait_queue_head_t rc_waitq; struct urb *rc_urb; struct usb_ctrlrequest *rc_setup_packet; u8 rc_buffer[6]; struct media_mixer_ctl *media_mixer_ctl; bool disconnected; void *private_data; void (*private_free)(struct usb_mixer_interface *mixer); void (*private_suspend)(struct usb_mixer_interface *mixer); }; #define MAX_CHANNELS 16 /* max logical channels */ enum { USB_MIXER_BOOLEAN, USB_MIXER_INV_BOOLEAN, USB_MIXER_S8, USB_MIXER_U8, USB_MIXER_S16, USB_MIXER_U16, USB_MIXER_S32, USB_MIXER_U32, USB_MIXER_BESPOKEN, /* non-standard type */ }; typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list); typedef int (*usb_mixer_elem_resume_func_t)(struct usb_mixer_elem_list *elem); struct usb_mixer_elem_list { struct usb_mixer_interface *mixer; struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; /* iterate over mixer element list of the given unit id */ #define for_each_mixer_elem(list, mixer, id) \ for ((list) = (mixer)->id_elems[id]; (list); (list) = (list)->next_id_elem) #define mixer_elem_list_to_info(list) \ container_of(list, struct usb_mixer_elem_info, head) struct usb_mixer_elem_info { struct usb_mixer_elem_list head; unsigned int control; /* CS or ICN (high byte) */ unsigned int cmask; /* channel mask bitmap: 0 = master */ unsigned int idx_off; /* Control index offset */ unsigned int ch_readonly; unsigned int master_readonly; int channels; int val_type; int min, max, res; int max_exposed; /* control API exposes the value in 0..max_exposed */ int dBmin, dBmax; int cached; int cache_val[MAX_CHANNELS]; u8 initialized; u8 min_mute; void *private_data; }; int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif); void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer); void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, struct snd_kcontrol *kctl, bool is_std_info); #define snd_usb_mixer_add_control(list, kctl) \ snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *_tlv); int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer); int snd_usb_mixer_resume(struct usb_mixer_interface *mixer); int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int value); int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int *value); extern void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl); extern const struct snd_kcontrol_new *snd_usb_feature_unit_ctl; #endif /* __USBMIXER_H */
// SPDX-License-Identifier: GPL-2.0 /* * Arm Statistical Profiling Extensions (SPE) support * Copyright (c) 2017-2018, Arm Ltd. */ #include <stdio.h> #include <string.h> #include <endian.h> #include <byteswap.h> #include <linux/bitops.h> #include <stdarg.h> #include <linux/kernel.h> #include <linux/unaligned.h> #include "arm-spe-pkt-decoder.h" static const char * const arm_spe_packet_name[] = { [ARM_SPE_PAD] = "PAD", [ARM_SPE_END] = "END", [ARM_SPE_TIMESTAMP] = "TS", [ARM_SPE_ADDRESS] = "ADDR", [ARM_SPE_COUNTER] = "LAT", [ARM_SPE_CONTEXT] = "CONTEXT", [ARM_SPE_OP_TYPE] = "OP-TYPE", [ARM_SPE_EVENTS] = "EVENTS", [ARM_SPE_DATA_SOURCE] = "DATA-SOURCE", }; const char *arm_spe_pkt_name(enum arm_spe_pkt_type type) { return arm_spe_packet_name[type]; } /* * Extracts the field "sz" from header bits and converts to bytes: * 00 : byte (1) * 01 : halfword (2) * 10 : word (4) * 11 : doubleword (8) */ static unsigned int arm_spe_payload_len(unsigned char hdr) { return 1U << ((hdr & GENMASK_ULL(5, 4)) >> 4); } static int arm_spe_get_payload(const unsigned char *buf, size_t len, unsigned char ext_hdr, struct arm_spe_pkt *packet) { size_t payload_len = arm_spe_payload_len(buf[ext_hdr]); if (len < 1 + ext_hdr + payload_len) return ARM_SPE_NEED_MORE_BYTES; buf += 1 + ext_hdr; switch (payload_len) { case 1: packet->payload = *(uint8_t *)buf; break; case 2: packet->payload = get_unaligned_le16(buf); break; case 4: packet->payload = get_unaligned_le32(buf); break; case 8: packet->payload = get_unaligned_le64(buf); break; default: return ARM_SPE_BAD_PACKET; } return 1 + ext_hdr + payload_len; } static int arm_spe_get_pad(struct arm_spe_pkt *packet) { packet->type = ARM_SPE_PAD; return 1; } static int arm_spe_get_alignment(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { unsigned int alignment = 1 << ((buf[0] & 0xf) + 1); if (len < alignment) return ARM_SPE_NEED_MORE_BYTES; packet->type = ARM_SPE_PAD; return alignment - (((uintptr_t)buf) & (alignment - 1)); } static int arm_spe_get_end(struct arm_spe_pkt *packet) { packet->type = ARM_SPE_END; return 1; } static int arm_spe_get_timestamp(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_TIMESTAMP; return arm_spe_get_payload(buf, len, 0, packet); } static int arm_spe_get_events(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_EVENTS; /* we use index to identify Events with a less number of * comparisons in arm_spe_pkt_desc(): E.g., the LLC-ACCESS, * LLC-REFILL, and REMOTE-ACCESS events are identified if * index > 1. */ packet->index = arm_spe_payload_len(buf[0]); return arm_spe_get_payload(buf, len, 0, packet); } static int arm_spe_get_data_source(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_DATA_SOURCE; return arm_spe_get_payload(buf, len, 0, packet); } static int arm_spe_get_context(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_CONTEXT; packet->index = SPE_CTX_PKT_HDR_INDEX(buf[0]); return arm_spe_get_payload(buf, len, 0, packet); } static int arm_spe_get_op_type(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_OP_TYPE; packet->index = SPE_OP_PKT_HDR_CLASS(buf[0]); return arm_spe_get_payload(buf, len, 0, packet); } static int arm_spe_get_counter(const unsigned char *buf, size_t len, const unsigned char ext_hdr, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_COUNTER; if (ext_hdr) packet->index = SPE_HDR_EXTENDED_INDEX(buf[0], buf[1]); else packet->index = SPE_HDR_SHORT_INDEX(buf[0]); return arm_spe_get_payload(buf, len, ext_hdr, packet); } static int arm_spe_get_addr(const unsigned char *buf, size_t len, const unsigned char ext_hdr, struct arm_spe_pkt *packet) { packet->type = ARM_SPE_ADDRESS; if (ext_hdr) packet->index = SPE_HDR_EXTENDED_INDEX(buf[0], buf[1]); else packet->index = SPE_HDR_SHORT_INDEX(buf[0]); return arm_spe_get_payload(buf, len, ext_hdr, packet); } static int arm_spe_do_get_packet(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { unsigned int hdr; unsigned char ext_hdr = 0; memset(packet, 0, sizeof(struct arm_spe_pkt)); if (!len) return ARM_SPE_NEED_MORE_BYTES; hdr = buf[0]; if (hdr == SPE_HEADER0_PAD) return arm_spe_get_pad(packet); if (hdr == SPE_HEADER0_END) /* no timestamp at end of record */ return arm_spe_get_end(packet); if (hdr == SPE_HEADER0_TIMESTAMP) return arm_spe_get_timestamp(buf, len, packet); if ((hdr & SPE_HEADER0_MASK1) == SPE_HEADER0_EVENTS) return arm_spe_get_events(buf, len, packet); if ((hdr & SPE_HEADER0_MASK1) == SPE_HEADER0_SOURCE) return arm_spe_get_data_source(buf, len, packet); if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_CONTEXT) return arm_spe_get_context(buf, len, packet); if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_OP_TYPE) return arm_spe_get_op_type(buf, len, packet); if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_EXTENDED) { /* 16-bit extended format header */ if (len == 1) return ARM_SPE_BAD_PACKET; ext_hdr = 1; hdr = buf[1]; if (hdr == SPE_HEADER1_ALIGNMENT) return arm_spe_get_alignment(buf, len, packet); } /* * The short format header's byte 0 or the extended format header's * byte 1 has been assigned to 'hdr', which uses the same encoding for * address packet and counter packet, so don't need to distinguish if * it's short format or extended format and handle in once. */ if ((hdr & SPE_HEADER0_MASK3) == SPE_HEADER0_ADDRESS) return arm_spe_get_addr(buf, len, ext_hdr, packet); if ((hdr & SPE_HEADER0_MASK3) == SPE_HEADER0_COUNTER) return arm_spe_get_counter(buf, len, ext_hdr, packet); return ARM_SPE_BAD_PACKET; } int arm_spe_get_packet(const unsigned char *buf, size_t len, struct arm_spe_pkt *packet) { int ret; ret = arm_spe_do_get_packet(buf, len, packet); /* put multiple consecutive PADs on the same line, up to * the fixed-width output format of 16 bytes per line. */ if (ret > 0 && packet->type == ARM_SPE_PAD) { while (ret < 16 && len > (size_t)ret && !buf[ret]) ret += 1; } return ret; } static int arm_spe_pkt_out_string(int *err, char **buf_p, size_t *blen, const char *fmt, ...) { va_list ap; int ret; /* Bail out if any error occurred */ if (err && *err) return *err; va_start(ap, fmt); ret = vsnprintf(*buf_p, *blen, fmt, ap); va_end(ap); if (ret < 0) { if (err && !*err) *err = ret; /* * A return value of *blen or more means that the output was * truncated and the buffer is overrun. */ } else if ((size_t)ret >= *blen) { (*buf_p)[*blen - 1] = '\0'; /* * Set *err to 'ret' to avoid overflow if tries to * fill this buffer sequentially. */ if (err && !*err) *err = ret; } else { *buf_p += ret; *blen -= ret; } return ret; } static int arm_spe_pkt_desc_event(const struct arm_spe_pkt *packet, char *buf, size_t buf_len) { u64 payload = packet->payload; int err = 0; arm_spe_pkt_out_string(&err, &buf, &buf_len, "EV"); if (payload & BIT(EV_EXCEPTION_GEN)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " EXCEPTION-GEN"); if (payload & BIT(EV_RETIRED)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " RETIRED"); if (payload & BIT(EV_L1D_ACCESS)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " L1D-ACCESS"); if (payload & BIT(EV_L1D_REFILL)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " L1D-REFILL"); if (payload & BIT(EV_TLB_ACCESS)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " TLB-ACCESS"); if (payload & BIT(EV_TLB_WALK)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " TLB-REFILL"); if (payload & BIT(EV_NOT_TAKEN)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " NOT-TAKEN"); if (payload & BIT(EV_MISPRED)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " MISPRED"); if (payload & BIT(EV_LLC_ACCESS)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " LLC-ACCESS"); if (payload & BIT(EV_LLC_MISS)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " LLC-REFILL"); if (payload & BIT(EV_REMOTE_ACCESS)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " REMOTE-ACCESS"); if (payload & BIT(EV_ALIGNMENT)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " ALIGNMENT"); if (payload & BIT(EV_PARTIAL_PREDICATE)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-PARTIAL-PRED"); if (payload & BIT(EV_EMPTY_PREDICATE)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-EMPTY-PRED"); return err; } static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet, char *buf, size_t buf_len) { u64 payload = packet->payload; int err = 0; switch (packet->index) { case SPE_OP_PKT_HDR_CLASS_OTHER: if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload)) { arm_spe_pkt_out_string(&err, &buf, &buf_len, "SVE-OTHER"); /* SVE effective vector length */ arm_spe_pkt_out_string(&err, &buf, &buf_len, " EVLEN %d", SPE_OP_PKG_SVE_EVL(payload)); if (payload & SPE_OP_PKT_SVE_FP) arm_spe_pkt_out_string(&err, &buf, &buf_len, " FP"); if (payload & SPE_OP_PKT_SVE_PRED) arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED"); } else { arm_spe_pkt_out_string(&err, &buf, &buf_len, "OTHER"); arm_spe_pkt_out_string(&err, &buf, &buf_len, " %s", payload & SPE_OP_PKT_COND ? "COND-SELECT" : "INSN-OTHER"); } break; case SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC: arm_spe_pkt_out_string(&err, &buf, &buf_len, payload & 0x1 ? "ST" : "LD"); if (SPE_OP_PKT_IS_LDST_ATOMIC(payload)) { if (payload & SPE_OP_PKT_AT) arm_spe_pkt_out_string(&err, &buf, &buf_len, " AT"); if (payload & SPE_OP_PKT_EXCL) arm_spe_pkt_out_string(&err, &buf, &buf_len, " EXCL"); if (payload & SPE_OP_PKT_AR) arm_spe_pkt_out_string(&err, &buf, &buf_len, " AR"); } switch (SPE_OP_PKT_LDST_SUBCLASS_GET(payload)) { case SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP: arm_spe_pkt_out_string(&err, &buf, &buf_len, " SIMD-FP"); break; case SPE_OP_PKT_LDST_SUBCLASS_GP_REG: arm_spe_pkt_out_string(&err, &buf, &buf_len, " GP-REG"); break; case SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG: arm_spe_pkt_out_string(&err, &buf, &buf_len, " UNSPEC-REG"); break; case SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG: arm_spe_pkt_out_string(&err, &buf, &buf_len, " NV-SYSREG"); break; case SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG: arm_spe_pkt_out_string(&err, &buf, &buf_len, " MTE-TAG"); break; case SPE_OP_PKT_LDST_SUBCLASS_MEMCPY: arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMCPY"); break; case SPE_OP_PKT_LDST_SUBCLASS_MEMSET: arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMSET"); break; default: break; } if (SPE_OP_PKT_IS_LDST_SVE(payload)) { /* SVE effective vector length */ arm_spe_pkt_out_string(&err, &buf, &buf_len, " EVLEN %d", SPE_OP_PKG_SVE_EVL(payload)); if (payload & SPE_OP_PKT_SVE_PRED) arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED"); if (payload & SPE_OP_PKT_SVE_SG) arm_spe_pkt_out_string(&err, &buf, &buf_len, " SG"); } break; case SPE_OP_PKT_HDR_CLASS_BR_ERET: arm_spe_pkt_out_string(&err, &buf, &buf_len, "B"); if (payload & SPE_OP_PKT_COND) arm_spe_pkt_out_string(&err, &buf, &buf_len, " COND"); if (SPE_OP_PKT_IS_INDIRECT_BRANCH(payload)) arm_spe_pkt_out_string(&err, &buf, &buf_len, " IND"); break; default: /* Unknown index */ err = -1; break; } return err; } static int arm_spe_pkt_desc_addr(const struct arm_spe_pkt *packet, char *buf, size_t buf_len) { int ns, el, idx = packet->index; int ch, pat; u64 payload = packet->payload; int err = 0; static const char *idx_name[] = {"PC", "TGT", "VA", "PA", "PBT"}; switch (idx) { case SPE_ADDR_PKT_HDR_INDEX_INS: case SPE_ADDR_PKT_HDR_INDEX_BRANCH: case SPE_ADDR_PKT_HDR_INDEX_PREV_BRANCH: ns = !!SPE_ADDR_PKT_GET_NS(payload); el = SPE_ADDR_PKT_GET_EL(payload); payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload); arm_spe_pkt_out_string(&err, &buf, &buf_len, "%s 0x%llx el%d ns=%d", idx_name[idx], payload, el, ns); break; case SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT: arm_spe_pkt_out_string(&err, &buf, &buf_len, "VA 0x%llx", payload); break; case SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS: ns = !!SPE_ADDR_PKT_GET_NS(payload); ch = !!SPE_ADDR_PKT_GET_CH(payload); pat = SPE_ADDR_PKT_GET_PAT(payload); payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload); arm_spe_pkt_out_string(&err, &buf, &buf_len, "PA 0x%llx ns=%d ch=%d pat=%x", payload, ns, ch, pat); break; default: /* Unknown index */ err = -1; break; } return err; } static int arm_spe_pkt_desc_counter(const struct arm_spe_pkt *packet, char *buf, size_t buf_len) { u64 payload = packet->payload; const char *name = arm_spe_pkt_name(packet->type); int err = 0; arm_spe_pkt_out_string(&err, &buf, &buf_len, "%s %d ", name, (unsigned short)payload); switch (packet->index) { case SPE_CNT_PKT_HDR_INDEX_TOTAL_LAT: arm_spe_pkt_out_string(&err, &buf, &buf_len, "TOT"); break; case SPE_CNT_PKT_HDR_INDEX_ISSUE_LAT: arm_spe_pkt_out_string(&err, &buf, &buf_len, "ISSUE"); break; case SPE_CNT_PKT_HDR_INDEX_TRANS_LAT: arm_spe_pkt_out_string(&err, &buf, &buf_len, "XLAT"); break; default: break; } return err; } int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t buf_len) { int idx = packet->index; unsigned long long payload = packet->payload; const char *name = arm_spe_pkt_name(packet->type); char *buf_orig = buf; size_t blen = buf_len; int err = 0; switch (packet->type) { case ARM_SPE_BAD: case ARM_SPE_PAD: case ARM_SPE_END: arm_spe_pkt_out_string(&err, &buf, &blen, "%s", name); break; case ARM_SPE_EVENTS: err = arm_spe_pkt_desc_event(packet, buf, buf_len); break; case ARM_SPE_OP_TYPE: err = arm_spe_pkt_desc_op_type(packet, buf, buf_len); break; case ARM_SPE_DATA_SOURCE: case ARM_SPE_TIMESTAMP: arm_spe_pkt_out_string(&err, &buf, &blen, "%s %lld", name, payload); break; case ARM_SPE_ADDRESS: err = arm_spe_pkt_desc_addr(packet, buf, buf_len); break; case ARM_SPE_CONTEXT: arm_spe_pkt_out_string(&err, &buf, &blen, "%s 0x%lx el%d", name, (unsigned long)payload, idx + 1); break; case ARM_SPE_COUNTER: err = arm_spe_pkt_desc_counter(packet, buf, buf_len); break; default: /* Unknown packet type */ err = -1; break; } /* Output raw data if detect any error */ if (err) { err = 0; arm_spe_pkt_out_string(&err, &buf_orig, &buf_len, "%s 0x%llx (%d)", name, payload, packet->index); } return err; }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ #define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ struct regmap; struct clk; struct mx25_tsadc { struct regmap *regs; struct irq_domain *domain; struct clk *clk; }; #define MX25_TSC_TGCR 0x00 #define MX25_TSC_TGSR 0x04 #define MX25_TSC_TICR 0x08 /* The same register layout for TC and GC queue */ #define MX25_ADCQ_FIFO 0x00 #define MX25_ADCQ_CR 0x04 #define MX25_ADCQ_SR 0x08 #define MX25_ADCQ_MR 0x0c #define MX25_ADCQ_ITEM_7_0 0x20 #define MX25_ADCQ_ITEM_15_8 0x24 #define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4)) #define MX25_ADCQ_MR_MASK 0xffffffff /* TGCR */ #define MX25_TGCR_PDBTIME(x) ((x) << 25) #define MX25_TGCR_PDBTIME_MASK GENMASK(31, 25) #define MX25_TGCR_PDBEN BIT(24) #define MX25_TGCR_PDEN BIT(23) #define MX25_TGCR_ADCCLKCFG(x) ((x) << 16) #define MX25_TGCR_GET_ADCCLK(x) (((x) >> 16) & 0x1f) #define MX25_TGCR_INTREFEN BIT(10) #define MX25_TGCR_POWERMODE_MASK GENMASK(9, 8) #define MX25_TGCR_POWERMODE_SAVE (1 << 8) #define MX25_TGCR_POWERMODE_ON (2 << 8) #define MX25_TGCR_STLC BIT(5) #define MX25_TGCR_SLPC BIT(4) #define MX25_TGCR_FUNC_RST BIT(2) #define MX25_TGCR_TSC_RST BIT(1) #define MX25_TGCR_CLK_EN BIT(0) /* TGSR */ #define MX25_TGSR_SLP_INT BIT(2) #define MX25_TGSR_GCQ_INT BIT(1) #define MX25_TGSR_TCQ_INT BIT(0) /* ADCQ_ITEM_* */ #define _MX25_ADCQ_ITEM(item, x) ((x) << ((item) * 4)) #define MX25_ADCQ_ITEM(item, x) ((item) >= 8 ? \ _MX25_ADCQ_ITEM((item) - 8, (x)) : _MX25_ADCQ_ITEM((item), (x))) /* ADCQ_FIFO (TCQFIFO and GCQFIFO) */ #define MX25_ADCQ_FIFO_DATA(x) (((x) >> 4) & 0xfff) #define MX25_ADCQ_FIFO_ID(x) ((x) & 0xf) /* ADCQ_CR (TCQR and GCQR) */ #define MX25_ADCQ_CR_PDCFG_LEVEL BIT(19) #define MX25_ADCQ_CR_PDMSK BIT(18) #define MX25_ADCQ_CR_FRST BIT(17) #define MX25_ADCQ_CR_QRST BIT(16) #define MX25_ADCQ_CR_RWAIT_MASK GENMASK(15, 12) #define MX25_ADCQ_CR_RWAIT(x) ((x) << 12) #define MX25_ADCQ_CR_WMRK_MASK GENMASK(11, 8) #define MX25_ADCQ_CR_WMRK(x) ((x) << 8) #define MX25_ADCQ_CR_LITEMID_MASK (0xf << 4) #define MX25_ADCQ_CR_LITEMID(x) ((x) << 4) #define MX25_ADCQ_CR_RPT BIT(3) #define MX25_ADCQ_CR_FQS BIT(2) #define MX25_ADCQ_CR_QSM_MASK GENMASK(1, 0) #define MX25_ADCQ_CR_QSM_PD 0x1 #define MX25_ADCQ_CR_QSM_FQS 0x2 #define MX25_ADCQ_CR_QSM_FQS_PD 0x3 /* ADCQ_SR (TCQSR and GCQSR) */ #define MX25_ADCQ_SR_FDRY BIT(15) #define MX25_ADCQ_SR_FULL BIT(14) #define MX25_ADCQ_SR_EMPT BIT(13) #define MX25_ADCQ_SR_FDN(x) (((x) >> 8) & 0x1f) #define MX25_ADCQ_SR_FRR BIT(6) #define MX25_ADCQ_SR_FUR BIT(5) #define MX25_ADCQ_SR_FOR BIT(4) #define MX25_ADCQ_SR_EOQ BIT(1) #define MX25_ADCQ_SR_PD BIT(0) /* ADCQ_MR (TCQMR and GCQMR) */ #define MX25_ADCQ_MR_FDRY_DMA BIT(31) #define MX25_ADCQ_MR_FER_DMA BIT(22) #define MX25_ADCQ_MR_FUR_DMA BIT(21) #define MX25_ADCQ_MR_FOR_DMA BIT(20) #define MX25_ADCQ_MR_EOQ_DMA BIT(17) #define MX25_ADCQ_MR_PD_DMA BIT(16) #define MX25_ADCQ_MR_FDRY_IRQ BIT(15) #define MX25_ADCQ_MR_FER_IRQ BIT(6) #define MX25_ADCQ_MR_FUR_IRQ BIT(5) #define MX25_ADCQ_MR_FOR_IRQ BIT(4) #define MX25_ADCQ_MR_EOQ_IRQ BIT(1) #define MX25_ADCQ_MR_PD_IRQ BIT(0) /* ADCQ_CFG (TICR, TCC0-7,GCC0-7) */ #define MX25_ADCQ_CFG_SETTLING_TIME(x) ((x) << 24) #define MX25_ADCQ_CFG_IGS (1 << 20) #define MX25_ADCQ_CFG_NOS_MASK GENMASK(19, 16) #define MX25_ADCQ_CFG_NOS(x) (((x) - 1) << 16) #define MX25_ADCQ_CFG_WIPER (1 << 15) #define MX25_ADCQ_CFG_YNLR (1 << 14) #define MX25_ADCQ_CFG_YPLL_HIGH (0 << 12) #define MX25_ADCQ_CFG_YPLL_OFF (1 << 12) #define MX25_ADCQ_CFG_YPLL_LOW (3 << 12) #define MX25_ADCQ_CFG_XNUR_HIGH (0 << 10) #define MX25_ADCQ_CFG_XNUR_OFF (1 << 10) #define MX25_ADCQ_CFG_XNUR_LOW (3 << 10) #define MX25_ADCQ_CFG_XPUL_HIGH (0 << 9) #define MX25_ADCQ_CFG_XPUL_OFF (1 << 9) #define MX25_ADCQ_CFG_REFP(sel) ((sel) << 7) #define MX25_ADCQ_CFG_REFP_YP MX25_ADCQ_CFG_REFP(0) #define MX25_ADCQ_CFG_REFP_XP MX25_ADCQ_CFG_REFP(1) #define MX25_ADCQ_CFG_REFP_EXT MX25_ADCQ_CFG_REFP(2) #define MX25_ADCQ_CFG_REFP_INT MX25_ADCQ_CFG_REFP(3) #define MX25_ADCQ_CFG_REFP_MASK GENMASK(8, 7) #define MX25_ADCQ_CFG_IN(sel) ((sel) << 4) #define MX25_ADCQ_CFG_IN_XP MX25_ADCQ_CFG_IN(0) #define MX25_ADCQ_CFG_IN_YP MX25_ADCQ_CFG_IN(1) #define MX25_ADCQ_CFG_IN_XN MX25_ADCQ_CFG_IN(2) #define MX25_ADCQ_CFG_IN_YN MX25_ADCQ_CFG_IN(3) #define MX25_ADCQ_CFG_IN_WIPER MX25_ADCQ_CFG_IN(4) #define MX25_ADCQ_CFG_IN_AUX0 MX25_ADCQ_CFG_IN(5) #define MX25_ADCQ_CFG_IN_AUX1 MX25_ADCQ_CFG_IN(6) #define MX25_ADCQ_CFG_IN_AUX2 MX25_ADCQ_CFG_IN(7) #define MX25_ADCQ_CFG_REFN(sel) ((sel) << 2) #define MX25_ADCQ_CFG_REFN_XN MX25_ADCQ_CFG_REFN(0) #define MX25_ADCQ_CFG_REFN_YN MX25_ADCQ_CFG_REFN(1) #define MX25_ADCQ_CFG_REFN_NGND MX25_ADCQ_CFG_REFN(2) #define MX25_ADCQ_CFG_REFN_NGND2 MX25_ADCQ_CFG_REFN(3) #define MX25_ADCQ_CFG_REFN_MASK GENMASK(3, 2) #define MX25_ADCQ_CFG_PENIACK (1 << 1) #endif /* _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ */
/* * sunxi boards common regulator (ahci target power supply, usb-vbus) code * * Copyright 2014 - Hans de Goede <[email protected]> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual * licensing only applies to this file, and not this project as a * whole. * * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Or, alternatively, * * b) Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <dt-bindings/gpio/gpio.h> / { reg_ahci_5v: ahci-5v { compatible = "regulator-fixed"; regulator-name = "ahci-5v"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-boot-on; enable-active-high; gpio = <&pio 1 8 GPIO_ACTIVE_HIGH>; status = "disabled"; }; reg_usb0_vbus: usb0-vbus { compatible = "regulator-fixed"; regulator-name = "usb0-vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&pio 1 9 GPIO_ACTIVE_HIGH>; status = "disabled"; }; reg_usb1_vbus: usb1-vbus { compatible = "regulator-fixed"; regulator-name = "usb1-vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-boot-on; enable-active-high; gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>; status = "disabled"; }; reg_usb2_vbus: usb2-vbus { compatible = "regulator-fixed"; regulator-name = "usb2-vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-boot-on; enable-active-high; gpio = <&pio 7 3 GPIO_ACTIVE_HIGH>; status = "disabled"; }; reg_vcc3v0: vcc3v0 { compatible = "regulator-fixed"; regulator-name = "vcc3v0"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; }; reg_vcc3v3: vcc3v3 { compatible = "regulator-fixed"; regulator-name = "vcc3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; reg_vcc5v0: vcc5v0 { compatible = "regulator-fixed"; regulator-name = "vcc5v0"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; }; };
// SPDX-License-Identifier: GPL-2.0+ OR MIT /* * Apple iPad Pro (9.7-inch) (Cellular), J128, iPad6,4 (A1674/A1675) * Copyright (c) 2022, Konrad Dybcio <[email protected]> */ /dts-v1/; #include "s8001-pro.dtsi" / { compatible = "apple,j128", "apple,s8001", "apple,arm-platform"; model = "Apple iPad Pro (9.7-inch) (Cellular)"; };
/* SPDX-License-Identifier: MIT */ #ifndef __NOUVEAU_FENCE_H__ #define __NOUVEAU_FENCE_H__ #include <linux/dma-fence.h> #include <nvif/event.h> struct nouveau_drm; struct nouveau_bo; struct nouveau_fence { struct dma_fence base; struct list_head head; struct nouveau_channel __rcu *channel; unsigned long timeout; }; int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *); int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *); void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *); bool nouveau_fence_done(struct nouveau_fence *); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); struct nouveau_fence_chan { spinlock_t lock; struct kref fence_ref; struct list_head pending; struct list_head flip; int (*emit)(struct nouveau_fence *); int (*sync)(struct nouveau_fence *, struct nouveau_channel *, struct nouveau_channel *); u32 (*read)(struct nouveau_channel *); int (*emit32)(struct nouveau_channel *, u64, u32); int (*sync32)(struct nouveau_channel *, u64, u32); u32 sequence; u32 context; char name[32]; struct work_struct uevent_work; struct nvif_event event; int notify_ref, dead, killed; }; struct nouveau_fence_priv { void (*dtor)(struct nouveau_drm *); bool (*suspend)(struct nouveau_drm *); void (*resume)(struct nouveau_drm *); int (*context_new)(struct nouveau_channel *); void (*context_del)(struct nouveau_channel *); bool uevent; }; #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *); void nouveau_fence_context_del(struct nouveau_fence_chan *); void nouveau_fence_context_free(struct nouveau_fence_chan *); void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error); int nv04_fence_create(struct nouveau_drm *); int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); int nv10_fence_emit(struct nouveau_fence *); int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *, struct nouveau_channel *); u32 nv10_fence_read(struct nouveau_channel *); void nv10_fence_context_del(struct nouveau_channel *); void nv10_fence_destroy(struct nouveau_drm *); int nv10_fence_create(struct nouveau_drm *); int nv17_fence_create(struct nouveau_drm *); void nv17_fence_resume(struct nouveau_drm *drm); int nv50_fence_create(struct nouveau_drm *); int nv84_fence_create(struct nouveau_drm *); int nvc0_fence_create(struct nouveau_drm *); struct nv84_fence_chan { struct nouveau_fence_chan base; struct nouveau_vma *vma; }; struct nv84_fence_priv { struct nouveau_fence_priv base; struct nouveau_bo *bo; u32 *suspend; struct mutex mutex; }; int nv84_fence_context_new(struct nouveau_channel *); #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* sun3xflop.h: Sun3/80 specific parts of the floppy driver. * * Derived partially from asm-sparc/floppy.h, which is: * Copyright (C) 1995 David S. Miller ([email protected]) * * Sun3x version 2/4/2000 Sam Creasey ([email protected]) */ #ifndef __ASM_SUN3X_FLOPPY_H #define __ASM_SUN3X_FLOPPY_H #include <linux/pgtable.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/sun3x.h> /* default interrupt vector */ #define SUN3X_FDC_IRQ 0x40 /* some constants */ #define FCR_TC 0x1 #define FCR_EJECT 0x2 #define FCR_MTRON 0x4 #define FCR_DSEL1 0x8 #define FCR_DSEL0 0x10 /* We don't need no stinkin' I/O port allocation crap. */ #undef release_region #undef request_region #define release_region(X, Y) do { } while(0) #define request_region(X, Y, Z) (1) struct sun3xflop_private { volatile unsigned char *status_r; volatile unsigned char *data_r; volatile unsigned char *fcr_r; volatile unsigned char *fvr_r; unsigned char fcr; } sun3x_fdc; /* Super paranoid... */ #undef HAVE_DISABLE_HLT /* Routines unique to each controller type on a Sun. */ static unsigned char sun3x_82072_fd_inb(int port) { static int once = 0; // udelay(5); switch(port & 7) { default: pr_crit("floppy: Asked to read unknown port %d\n", port); panic("floppy: Port bolixed."); case 4: /* FD_STATUS */ return (*sun3x_fdc.status_r) & ~STATUS_DMA; case 5: /* FD_DATA */ return (*sun3x_fdc.data_r); case 7: /* FD_DIR */ /* ugly hack, I can't find a way to actually detect the disk */ if(!once) { once = 1; return 0x80; } return 0; }; panic("sun_82072_fd_inb: How did I get here?"); } static void sun3x_82072_fd_outb(unsigned char value, int port) { // udelay(5); switch(port & 7) { default: pr_crit("floppy: Asked to write to unknown port %d\n", port); panic("floppy: Port bolixed."); case 2: /* FD_DOR */ /* Oh geese, 82072 on the Sun has no DOR register, * so we make do with taunting the FCR. * * ASSUMPTIONS: There will only ever be one floppy * drive attached to a Sun controller * and it will be at drive zero. */ { unsigned char fcr = sun3x_fdc.fcr; if(value & 0x10) { fcr |= (FCR_DSEL0 | FCR_MTRON); } else fcr &= ~(FCR_DSEL0 | FCR_MTRON); if(fcr != sun3x_fdc.fcr) { *(sun3x_fdc.fcr_r) = fcr; sun3x_fdc.fcr = fcr; } } break; case 5: /* FD_DATA */ *(sun3x_fdc.data_r) = value; break; case 7: /* FD_DCR */ *(sun3x_fdc.status_r) = value; break; case 4: /* FD_STATUS */ *(sun3x_fdc.status_r) = value; break; } return; } asmlinkage irqreturn_t sun3xflop_hardint(int irq, void *dev_id) { register unsigned char st; #undef TRACE_FLPY_INT #define NO_FLOPPY_ASSEMBLER #ifdef TRACE_FLPY_INT static int calls=0; static int bytes=0; static int dma_wait=0; #endif if(!doing_pdma) { floppy_interrupt(irq, dev_id); return IRQ_HANDLED; } // pr_info("doing pdma\n");// st %x\n", sun_fdc->status_82072); #ifdef TRACE_FLPY_INT if(!calls) bytes = virtual_dma_count; #endif { register int lcount; register char *lptr; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { /* st=fd_inb(virtual_dma_port+4) & 0x80 ; */ st = *(sun3x_fdc.status_r); /* if(st != 0xa0) */ /* break; */ if((st & 0x80) == 0) { virtual_dma_count = lcount; virtual_dma_addr = lptr; return IRQ_HANDLED; } if((st & 0x20) == 0) break; if(virtual_dma_mode) /* fd_outb(*lptr, virtual_dma_port+5); */ *(sun3x_fdc.data_r) = *lptr; else /* *lptr = fd_inb(virtual_dma_port+5); */ *lptr = *(sun3x_fdc.data_r); } virtual_dma_count = lcount; virtual_dma_addr = lptr; /* st = fd_inb(virtual_dma_port+4); */ st = *(sun3x_fdc.status_r); } #ifdef TRACE_FLPY_INT calls++; #endif // pr_info("st=%02x\n", st); if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; doing_pdma = 0; #ifdef TRACE_FLPY_INT pr_info("count=%x, residue=%x calls=%d bytes=%x dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; dma_wait=0; #endif floppy_interrupt(irq, dev_id); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT if(!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; } static int sun3xflop_request_irq(void) { static int once = 0; int error; if(!once) { once = 1; error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, 0, "floppy", NULL); return ((error == 0) ? 0 : -1); } else return 0; } static void __init floppy_set_flags(int *ints,int param, int param2); static int sun3xflop_init(void) { if(FLOPPY_IRQ < 0x40) FLOPPY_IRQ = SUN3X_FDC_IRQ; sun3x_fdc.status_r = (volatile unsigned char *)SUN3X_FDC; sun3x_fdc.data_r = (volatile unsigned char *)(SUN3X_FDC+1); sun3x_fdc.fcr_r = (volatile unsigned char *)SUN3X_FDC_FCR; sun3x_fdc.fvr_r = (volatile unsigned char *)SUN3X_FDC_FVR; sun3x_fdc.fcr = 0; /* Last minute sanity check... */ if(*sun3x_fdc.status_r == 0xff) { return -1; } *sun3x_fdc.fvr_r = FLOPPY_IRQ; *sun3x_fdc.fcr_r = FCR_TC; udelay(10); *sun3x_fdc.fcr_r = 0; /* Success... */ floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this. allowed_drive_mask = 0x01; return (int) SUN3X_FDC; } /* I'm not precisely sure this eject routine works */ static int sun3x_eject(void) { if(MACH_IS_SUN3X) { sun3x_fdc.fcr |= (FCR_DSEL0 | FCR_EJECT); *(sun3x_fdc.fcr_r) = sun3x_fdc.fcr; udelay(10); sun3x_fdc.fcr &= ~(FCR_DSEL0 | FCR_EJECT); *(sun3x_fdc.fcr_r) = sun3x_fdc.fcr; } return 0; } #define fd_eject(drive) sun3x_eject() #endif /* !(__ASM_SUN3X_FLOPPY_H) */
// SPDX-License-Identifier: ISC /* * Device Tree file for Intel XScale Network Processors * in the IXP 4xx series. */ #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h> / { soc { #address-cells = <1>; #size-cells = <1>; ranges; compatible = "simple-bus"; interrupt-parent = <&intcon>; /* * The IXP4xx expansion bus is a set of up to 7 each up to 16MB * windows in the 256MB space from 0x50000000 to 0x5fffffff. */ bus@c4000000 { /* compatible and reg filled in by per-soc device tree */ native-endian; #address-cells = <2>; #size-cells = <1>; ranges = <0 0x0 0x50000000 0x01000000>, <1 0x0 0x51000000 0x01000000>, <2 0x0 0x52000000 0x01000000>, <3 0x0 0x53000000 0x01000000>, <4 0x0 0x54000000 0x01000000>, <5 0x0 0x55000000 0x01000000>, <6 0x0 0x56000000 0x01000000>, <7 0x0 0x57000000 0x01000000>; dma-ranges = <0 0x0 0x50000000 0x01000000>, <1 0x0 0x51000000 0x01000000>, <2 0x0 0x52000000 0x01000000>, <3 0x0 0x53000000 0x01000000>, <4 0x0 0x54000000 0x01000000>, <5 0x0 0x55000000 0x01000000>, <6 0x0 0x56000000 0x01000000>, <7 0x0 0x57000000 0x01000000>; }; qmgr: queue-manager@60000000 { compatible = "intel,ixp4xx-ahb-queue-manager"; reg = <0x60000000 0x4000>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <4 IRQ_TYPE_LEVEL_HIGH>; }; pci@c0000000 { /* compatible filled in by per-soc device tree */ reg = <0xc0000000 0x1000>; interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, <9 IRQ_TYPE_LEVEL_HIGH>, <10 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <3>; #size-cells = <2>; device_type = "pci"; bus-range = <0x00 0xff>; status = "disabled"; ranges = /* * 64MB 32bit non-prefetchable memory 0x48000000-0x4bffffff * done in 4 chunks of 16MB each. */ <0x02000000 0 0x48000000 0x48000000 0 0x04000000>, /* 64KB I/O space at 0x4c000000 */ <0x01000000 0 0x00000000 0x4c000000 0 0x00010000>; /* * This needs to map to the start of physical memory so * PCI devices can see all (hopefully) memory. This is done * using 4 1:1 16MB windows, so the RAM should not be more than * 64 MB for this to work. If your memory is anywhere else * than at 0x0 you need to alter this. */ dma-ranges = <0x02000000 0 0x00000000 0x00000000 0 0x04000000>; /* Each unique DTS using PCI must specify the swizzling */ }; uart0: serial@c8000000 { compatible = "intel,xscale-uart"; reg = <0xc8000000 0x1000>; /* * The reg-offset and reg-shift is a side effect * of running the platform in big endian mode. */ reg-offset = <3>; reg-shift = <2>; interrupts = <15 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <14745600>; no-loopback-test; }; uart1: serial@c8001000 { compatible = "intel,xscale-uart"; reg = <0xc8001000 0x1000>; /* * The reg-offset and reg-shift is a side effect * of running the platform in big endian mode. */ reg-offset = <3>; reg-shift = <2>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <14745600>; no-loopback-test; }; gpio0: gpio@c8004000 { compatible = "intel,ixp4xx-gpio"; reg = <0xc8004000 0x1000>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; intcon: interrupt-controller@c8003000 { /* * Note: no compatible string. The subvariant of the * chip needs to define what version it is. The * location of the interrupt controller is fixed in * memory across all variants. */ reg = <0xc8003000 0x100>; interrupt-controller; #interrupt-cells = <2>; }; timer@c8005000 { compatible = "intel,ixp4xx-timer"; reg = <0xc8005000 0x100>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; }; npe: npe@c8006000 { compatible = "intel,ixp4xx-network-processing-engine"; reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>; #address-cells = <1>; #size-cells = <0>; /* NPE-A contains two high-speed serial links */ hss@0 { compatible = "intel,ixp4xx-hss"; reg = <0>; intel,npe-handle = <&npe 0>; status = "disabled"; }; hss@1 { compatible = "intel,ixp4xx-hss"; reg = <1>; intel,npe-handle = <&npe 0>; status = "disabled"; }; /* NPE-C contains a crypto accelerator */ crypto { compatible = "intel,ixp4xx-crypto"; intel,npe-handle = <&npe 2>; queue-rx = <&qmgr 30>; queue-txready = <&qmgr 29>; }; }; /* This is known as EthB */ ethernet@c8009000 { compatible = "intel,ixp4xx-ethernet"; reg = <0xc8009000 0x1000>; status = "disabled"; /* Dummy values that depend on firmware */ queue-rx = <&qmgr 3>; queue-txready = <&qmgr 20>; intel,npe-handle = <&npe 1>; }; /* This is known as EthC */ ethernet@c800a000 { compatible = "intel,ixp4xx-ethernet"; reg = <0xc800a000 0x1000>; status = "disabled"; /* Dummy values that depend on firmware */ queue-rx = <&qmgr 0>; queue-txready = <&qmgr 0>; intel,npe-handle = <&npe 2>; }; /* This is known as EthA */ ethernet@c800c000 { compatible = "intel,ixp4xx-ethernet"; reg = <0xc800c000 0x1000>; status = "disabled"; intel,npe = <0>; /* Dummy values that depend on firmware */ queue-rx = <&qmgr 0>; queue-txready = <&qmgr 0>; }; }; };
// SPDX-License-Identifier: GPL-2.0 /* * Handle device page faults * * Copyright (C) 2020 ARM Ltd. */ #include <linux/iommu.h> #include <linux/list.h> #include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "iommu-priv.h" /* * Return the fault parameter of a device if it exists. Otherwise, return NULL. * On a successful return, the caller takes a reference of this parameter and * should put it after use by calling iopf_put_dev_fault_param(). */ static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev) { struct dev_iommu *param = dev->iommu; struct iommu_fault_param *fault_param; rcu_read_lock(); fault_param = rcu_dereference(param->fault_param); if (fault_param && !refcount_inc_not_zero(&fault_param->users)) fault_param = NULL; rcu_read_unlock(); return fault_param; } /* Caller must hold a reference of the fault parameter. */ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param) { if (refcount_dec_and_test(&fault_param->users)) kfree_rcu(fault_param, rcu); } static void __iopf_free_group(struct iopf_group *group) { struct iopf_fault *iopf, *next; list_for_each_entry_safe(iopf, next, &group->faults, list) { if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) kfree(iopf); } /* Pair with iommu_report_device_fault(). */ iopf_put_dev_fault_param(group->fault_param); } void iopf_free_group(struct iopf_group *group) { __iopf_free_group(group); kfree(group); } EXPORT_SYMBOL_GPL(iopf_free_group); /* Non-last request of a group. Postpone until the last one. */ static int report_partial_fault(struct iommu_fault_param *fault_param, struct iommu_fault *fault) { struct iopf_fault *iopf; iopf = kzalloc(sizeof(*iopf), GFP_KERNEL); if (!iopf) return -ENOMEM; iopf->fault = *fault; mutex_lock(&fault_param->lock); list_add(&iopf->list, &fault_param->partial); mutex_unlock(&fault_param->lock); return 0; } static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param, struct iopf_fault *evt, struct iopf_group *abort_group) { struct iopf_fault *iopf, *next; struct iopf_group *group; group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) { /* * We always need to construct the group as we need it to abort * the request at the driver if it can't be handled. */ group = abort_group; } group->fault_param = iopf_param; group->last_fault.fault = evt->fault; INIT_LIST_HEAD(&group->faults); INIT_LIST_HEAD(&group->pending_node); list_add(&group->last_fault.list, &group->faults); /* See if we have partial faults for this group */ mutex_lock(&iopf_param->lock); list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { if (iopf->fault.prm.grpid == evt->fault.prm.grpid) /* Insert *before* the last fault */ list_move(&iopf->list, &group->faults); } list_add(&group->pending_node, &iopf_param->faults); mutex_unlock(&iopf_param->lock); group->fault_count = list_count_nodes(&group->faults); return group; } static struct iommu_attach_handle *find_fault_handler(struct device *dev, struct iopf_fault *evt) { struct iommu_fault *fault = &evt->fault; struct iommu_attach_handle *attach_handle; if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { attach_handle = iommu_attach_handle_get(dev->iommu_group, fault->prm.pasid, 0); if (IS_ERR(attach_handle)) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (!ops->user_pasid_table) return NULL; /* * The iommu driver for this device supports user- * managed PASID table. Therefore page faults for * any PASID should go through the NESTING domain * attached to the device RID. */ attach_handle = iommu_attach_handle_get( dev->iommu_group, IOMMU_NO_PASID, IOMMU_DOMAIN_NESTED); if (IS_ERR(attach_handle)) return NULL; } } else { attach_handle = iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0); if (IS_ERR(attach_handle)) return NULL; } if (!attach_handle->domain->iopf_handler) return NULL; return attach_handle; } static void iopf_error_response(struct device *dev, struct iopf_fault *evt) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_fault *fault = &evt->fault; struct iommu_page_response resp = { .pasid = fault->prm.pasid, .grpid = fault->prm.grpid, .code = IOMMU_PAGE_RESP_INVALID }; ops->page_response(dev, evt, &resp); } /** * iommu_report_device_fault() - Report fault event to device driver * @dev: the device * @evt: fault event data * * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ * handler. If this function fails then ops->page_response() was called to * complete evt if required. * * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't * expect a response. It may be generated when disabling a PASID (issuing a * PASID stop request) by some PCI devices. * * The PASID stop request is issued by the device driver before unbind(). Once * it completes, no page request is generated for this PASID anymore and * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait * for all outstanding page requests to come back with a response before * completing the PASID stop request. Others do not wait for page responses, and * instead issue this Stop Marker that tells us when the PASID can be * reallocated. * * It is safe to discard the Stop Marker because it is an optimization. * a. Page requests, which are posted requests, have been flushed to the IOMMU * when the stop request completes. * b. The IOMMU driver flushes all fault queues on unbind() before freeing the * PASID. * * So even though the Stop Marker might be issued by the device *after* the stop * request completes, outstanding faults will have been dealt with by the time * the PASID is freed. * * Any valid page fault will be eventually routed to an iommu domain and the * page fault handler installed there will get called. The users of this * handling framework should guarantee that the iommu domain could only be * freed after the device has stopped generating page faults (or the iommu * hardware has been set to block the page faults) and the pending page faults * have been flushed. In case no page fault handler is attached or no iopf params * are setup, then the ops->page_response() is called to complete the evt. * * Returns 0 on success, or an error in case of a bad/failed iopf setup. */ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) { struct iommu_attach_handle *attach_handle; struct iommu_fault *fault = &evt->fault; struct iommu_fault_param *iopf_param; struct iopf_group abort_group = {}; struct iopf_group *group; attach_handle = find_fault_handler(dev, evt); if (!attach_handle) goto err_bad_iopf; /* * Something has gone wrong if a fault capable domain is attached but no * iopf_param is setup */ iopf_param = iopf_get_dev_fault_param(dev); if (WARN_ON(!iopf_param)) goto err_bad_iopf; if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { int ret; ret = report_partial_fault(iopf_param, fault); iopf_put_dev_fault_param(iopf_param); /* A request that is not the last does not need to be ack'd */ return ret; } /* * This is the last page fault of a group. Allocate an iopf group and * pass it to domain's page fault handler. The group holds a reference * count of the fault parameter. It will be released after response or * error path of this function. If an error is returned, the caller * will send a response to the hardware. We need to clean up before * leaving, otherwise partial faults will be stuck. */ group = iopf_group_alloc(iopf_param, evt, &abort_group); if (group == &abort_group) goto err_abort; group->attach_handle = attach_handle; /* * On success iopf_handler must call iopf_group_response() and * iopf_free_group() */ if (group->attach_handle->domain->iopf_handler(group)) goto err_abort; return 0; err_abort: dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", fault->prm.pasid); iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); if (group == &abort_group) __iopf_free_group(group); else iopf_free_group(group); return 0; err_bad_iopf: if (fault->type == IOMMU_FAULT_PAGE_REQ) iopf_error_response(dev, evt); return -EINVAL; } EXPORT_SYMBOL_GPL(iommu_report_device_fault); /** * iopf_queue_flush_dev - Ensure that all queued faults have been processed * @dev: the endpoint whose faults need to be flushed. * * The IOMMU driver calls this before releasing a PASID, to ensure that all * pending faults for this PASID have been handled, and won't hit the address * space of the next process that uses this PASID. The driver must make sure * that no new fault is added to the queue. In particular it must flush its * low-level queue before calling this function. * * Return: 0 on success and <0 on error. */ int iopf_queue_flush_dev(struct device *dev) { struct iommu_fault_param *iopf_param; /* * It's a driver bug to be here after iopf_queue_remove_device(). * Therefore, it's safe to dereference the fault parameter without * holding the lock. */ iopf_param = rcu_dereference_check(dev->iommu->fault_param, true); if (WARN_ON(!iopf_param)) return -ENODEV; flush_workqueue(iopf_param->queue->wq); return 0; } EXPORT_SYMBOL_GPL(iopf_queue_flush_dev); /** * iopf_group_response - Respond a group of page faults * @group: the group of faults with the same group id * @status: the response code */ void iopf_group_response(struct iopf_group *group, enum iommu_page_response_code status) { struct iommu_fault_param *fault_param = group->fault_param; struct iopf_fault *iopf = &group->last_fault; struct device *dev = group->fault_param->dev; const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_page_response resp = { .pasid = iopf->fault.prm.pasid, .grpid = iopf->fault.prm.grpid, .code = status, }; /* Only send response if there is a fault report pending */ mutex_lock(&fault_param->lock); if (!list_empty(&group->pending_node)) { ops->page_response(dev, &group->last_fault, &resp); list_del_init(&group->pending_node); } mutex_unlock(&fault_param->lock); } EXPORT_SYMBOL_GPL(iopf_group_response); /** * iopf_queue_discard_partial - Remove all pending partial fault * @queue: the queue whose partial faults need to be discarded * * When the hardware queue overflows, last page faults in a group may have been * lost and the IOMMU driver calls this to discard all partial faults. The * driver shouldn't be adding new faults to this queue concurrently. * * Return: 0 on success and <0 on error. */ int iopf_queue_discard_partial(struct iopf_queue *queue) { struct iopf_fault *iopf, *next; struct iommu_fault_param *iopf_param; if (!queue) return -EINVAL; mutex_lock(&queue->lock); list_for_each_entry(iopf_param, &queue->devices, queue_list) { mutex_lock(&iopf_param->lock); list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { list_del(&iopf->list); kfree(iopf); } mutex_unlock(&iopf_param->lock); } mutex_unlock(&queue->lock); return 0; } EXPORT_SYMBOL_GPL(iopf_queue_discard_partial); /** * iopf_queue_add_device - Add producer to the fault queue * @queue: IOPF queue * @dev: device to add * * Return: 0 on success and <0 on error. */ int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) { int ret = 0; struct dev_iommu *param = dev->iommu; struct iommu_fault_param *fault_param; const struct iommu_ops *ops = dev_iommu_ops(dev); if (!ops->page_response) return -ENODEV; mutex_lock(&queue->lock); mutex_lock(&param->lock); if (rcu_dereference_check(param->fault_param, lockdep_is_held(&param->lock))) { ret = -EBUSY; goto done_unlock; } fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL); if (!fault_param) { ret = -ENOMEM; goto done_unlock; } mutex_init(&fault_param->lock); INIT_LIST_HEAD(&fault_param->faults); INIT_LIST_HEAD(&fault_param->partial); fault_param->dev = dev; refcount_set(&fault_param->users, 1); list_add(&fault_param->queue_list, &queue->devices); fault_param->queue = queue; rcu_assign_pointer(param->fault_param, fault_param); done_unlock: mutex_unlock(&param->lock); mutex_unlock(&queue->lock); return ret; } EXPORT_SYMBOL_GPL(iopf_queue_add_device); /** * iopf_queue_remove_device - Remove producer from fault queue * @queue: IOPF queue * @dev: device to remove * * Removing a device from an iopf_queue. It's recommended to follow these * steps when removing a device: * * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware * and flush any hardware page request queues. This should be done before * calling into this helper. * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should * not retry. This helper function handles this. * - Disable PRI on the device: After calling this helper, the caller could * then disable PRI on the device. * * Calling iopf_queue_remove_device() essentially disassociates the device. * The fault_param might still exist, but iommu_page_response() will do * nothing. The device fault parameter reference count has been properly * passed from iommu_report_device_fault() to the fault handling work, and * will eventually be released after iommu_page_response(). */ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) { struct iopf_fault *partial_iopf; struct iopf_fault *next; struct iopf_group *group, *temp; struct dev_iommu *param = dev->iommu; struct iommu_fault_param *fault_param; const struct iommu_ops *ops = dev_iommu_ops(dev); mutex_lock(&queue->lock); mutex_lock(&param->lock); fault_param = rcu_dereference_check(param->fault_param, lockdep_is_held(&param->lock)); if (WARN_ON(!fault_param || fault_param->queue != queue)) goto unlock; mutex_lock(&fault_param->lock); list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list) kfree(partial_iopf); list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) { struct iopf_fault *iopf = &group->last_fault; struct iommu_page_response resp = { .pasid = iopf->fault.prm.pasid, .grpid = iopf->fault.prm.grpid, .code = IOMMU_PAGE_RESP_INVALID }; ops->page_response(dev, iopf, &resp); list_del_init(&group->pending_node); } mutex_unlock(&fault_param->lock); list_del(&fault_param->queue_list); /* dec the ref owned by iopf_queue_add_device() */ rcu_assign_pointer(param->fault_param, NULL); iopf_put_dev_fault_param(fault_param); unlock: mutex_unlock(&param->lock); mutex_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(iopf_queue_remove_device); /** * iopf_queue_alloc - Allocate and initialize a fault queue * @name: a unique string identifying the queue (for workqueue) * * Return: the queue on success and NULL on error. */ struct iopf_queue *iopf_queue_alloc(const char *name) { struct iopf_queue *queue; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return NULL; /* * The WQ is unordered because the low-level handler enqueues faults by * group. PRI requests within a group have to be ordered, but once * that's dealt with, the high-level function can handle groups out of * order. */ queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name); if (!queue->wq) { kfree(queue); return NULL; } INIT_LIST_HEAD(&queue->devices); mutex_init(&queue->lock); return queue; } EXPORT_SYMBOL_GPL(iopf_queue_alloc); /** * iopf_queue_free - Free IOPF queue * @queue: queue to free * * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or * adding/removing devices on this queue anymore. */ void iopf_queue_free(struct iopf_queue *queue) { struct iommu_fault_param *iopf_param, *next; if (!queue) return; list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list) iopf_queue_remove_device(queue, iopf_param->dev); destroy_workqueue(queue->wq); kfree(queue); } EXPORT_SYMBOL_GPL(iopf_queue_free);
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <test_progs.h> #include "arena_atomics.skel.h" static void test_add(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.add); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->add64_value, 3, "add64_value"); ASSERT_EQ(skel->arena->add64_result, 1, "add64_result"); ASSERT_EQ(skel->arena->add32_value, 3, "add32_value"); ASSERT_EQ(skel->arena->add32_result, 1, "add32_result"); ASSERT_EQ(skel->arena->add_stack_value_copy, 3, "add_stack_value"); ASSERT_EQ(skel->arena->add_stack_result, 1, "add_stack_result"); ASSERT_EQ(skel->arena->add_noreturn_value, 3, "add_noreturn_value"); } static void test_sub(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.sub); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->sub64_value, -1, "sub64_value"); ASSERT_EQ(skel->arena->sub64_result, 1, "sub64_result"); ASSERT_EQ(skel->arena->sub32_value, -1, "sub32_value"); ASSERT_EQ(skel->arena->sub32_result, 1, "sub32_result"); ASSERT_EQ(skel->arena->sub_stack_value_copy, -1, "sub_stack_value"); ASSERT_EQ(skel->arena->sub_stack_result, 1, "sub_stack_result"); ASSERT_EQ(skel->arena->sub_noreturn_value, -1, "sub_noreturn_value"); } static void test_and(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.and); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->and64_value, 0x010ull << 32, "and64_value"); ASSERT_EQ(skel->arena->and32_value, 0x010, "and32_value"); } static void test_or(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.or); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->or64_value, 0x111ull << 32, "or64_value"); ASSERT_EQ(skel->arena->or32_value, 0x111, "or32_value"); } static void test_xor(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.xor); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->xor64_value, 0x101ull << 32, "xor64_value"); ASSERT_EQ(skel->arena->xor32_value, 0x101, "xor32_value"); } static void test_cmpxchg(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.cmpxchg); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->cmpxchg64_value, 2, "cmpxchg64_value"); ASSERT_EQ(skel->arena->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); ASSERT_EQ(skel->arena->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed"); ASSERT_EQ(skel->arena->cmpxchg32_value, 2, "lcmpxchg32_value"); ASSERT_EQ(skel->arena->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); ASSERT_EQ(skel->arena->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); } static void test_xchg(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.xchg); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->xchg64_value, 2, "xchg64_value"); ASSERT_EQ(skel->arena->xchg64_result, 1, "xchg64_result"); ASSERT_EQ(skel->arena->xchg32_value, 2, "xchg32_value"); ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); } static void test_uaf(struct arena_atomics *skel) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; /* No need to attach it, just run it directly */ prog_fd = bpf_program__fd(skel->progs.uaf); err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) return; ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails"); } void test_arena_atomics(void) { struct arena_atomics *skel; int err; skel = arena_atomics__open(); if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open")) return; if (skel->data->skip_tests) { printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang", __func__); test__skip(); goto cleanup; } err = arena_atomics__load(skel); if (!ASSERT_OK(err, "arena atomics skeleton load")) return; skel->bss->pid = getpid(); if (test__start_subtest("add")) test_add(skel); if (test__start_subtest("sub")) test_sub(skel); if (test__start_subtest("and")) test_and(skel); if (test__start_subtest("or")) test_or(skel); if (test__start_subtest("xor")) test_xor(skel); if (test__start_subtest("cmpxchg")) test_cmpxchg(skel); if (test__start_subtest("xchg")) test_xchg(skel); if (test__start_subtest("uaf")) test_uaf(skel); cleanup: arena_atomics__destroy(skel); }
// SPDX-License-Identifier: GPL-2.0 /* Generic part */ typedef struct { block_t *p; block_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline block_t *block_end(struct buffer_head *bh) { return (block_t *)((char*)bh->b_data + bh->b_size); } static inline Indirect *get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[DEPTH], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, i_data(inode) + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, block_to_cpu(p->key)); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int n = 0; int i; int parent = minix_new_block(inode); int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { struct buffer_head *bh; /* Allocate the next block */ int nr = minix_new_block(inode); if (!nr) break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); if (!bh) { minix_free_block(inode, nr); err = -ENOMEM; break; } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; branch[n].p = (block_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); return err; } static inline int splice_branch(struct inode *inode, Indirect chain[DEPTH], Indirect *where, int num) { int i; write_lock(&pointers_lock); /* Verify that place we are splicing to is still there and vacant */ if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); /* We are done with atomic stuff, now do the rest of housekeeping */ inode_set_ctime_current(inode); /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) minix_free_block(inode, block_to_cpu(where[i].key)); return -EAGAIN; } static int get_block(struct inode * inode, sector_t block, struct buffer_head *bh, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; int left; int depth = block_to_path(inode, block, offsets); if (depth == 0) goto out; reread: partial = get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(block_t *p, block_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[DEPTH], Indirect chain[DEPTH], block_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = get_branch(inode, k, offsets, chain, &err); write_lock(&pointers_lock); if (!partial) partial = chain + k-1; if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--) ; if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, block_t *p, block_t *q) { unsigned long nr; for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (nr) { *p = 0; minix_free_block(inode, nr); } } } static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); if (!bh) continue; free_branches(inode, (block_t*)bh->b_data, block_end(bh), depth); bforget(bh); minix_free_block(inode, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } static inline void truncate (struct inode * inode) { struct super_block *sb = inode->i_sb; block_t *idata = i_data(inode); int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; block_t nr = 0; int n; int first_whole; long iblock; iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (!n) return; if (n == 1) { free_data(inode, idata+offsets[0], idata + DIRECT); first_whole = 0; goto do_indirects; } first_whole = offsets[0] + 1 - DIRECT; partial = find_shared(inode, n, offsets, chain, &nr); if (nr) { if (partial == chain) mark_inode_dirty(inode); else mark_buffer_dirty_inode(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); mark_buffer_dirty_inode(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ while (first_whole < DEPTH-1) { nr = idata[DIRECT+first_whole]; if (nr) { idata[DIRECT+first_whole] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, first_whole+1); } first_whole++; } inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); } static inline unsigned nblocks(loff_t size, struct super_block *sb) { int k = sb->s_blocksize_bits - 10; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); res = blocks; while (--i && blocks > direct) { blocks -= direct; blocks += sb->s_blocksize/sizeof(block_t) - 1; blocks /= sb->s_blocksize/sizeof(block_t); res += blocks; direct = 1; } return res; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Conexant CX23885/7/8 PCIe bridge * * Infrared remote control input device * * Most of this file is * * Copyright (C) 2009 Andy Walls <[email protected]> * * However, the cx23885_input_{init,fini} functions contained herein are * derived from Linux kernel files linux/media/video/.../...-input.c marked as: * * Copyright (C) 2008 <srinivasa.deevi at conexant dot com> * Copyright (C) 2005 Ludovico Cavedon <[email protected]> * Markus Rechberger <[email protected]> * Mauro Carvalho Chehab <[email protected]> * Sascha Sommer <[email protected]> * Copyright (C) 2004, 2005 Chris Pascoe * Copyright (C) 2003, 2004 Gerd Knorr * Copyright (C) 2003 Pavel Machek */ #include "cx23885.h" #include "cx23885-input.h" #include <linux/slab.h> #include <media/rc-core.h> #include <media/v4l2-subdev.h> #define MODULE_NAME "cx23885" static void cx23885_input_process_measurements(struct cx23885_dev *dev, bool overrun) { struct cx23885_kernel_ir *kernel_ir = dev->kernel_ir; ssize_t num; int count, i; bool handle = false; struct ir_raw_event ir_core_event[64]; do { num = 0; v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event, sizeof(ir_core_event), &num); count = num / sizeof(struct ir_raw_event); for (i = 0; i < count; i++) { ir_raw_event_store(kernel_ir->rc, &ir_core_event[i]); handle = true; } } while (num != 0); if (overrun) ir_raw_event_overflow(kernel_ir->rc); else if (handle) ir_raw_event_handle(kernel_ir->rc); } void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events) { struct v4l2_subdev_ir_parameters params; int overrun, data_available; if (dev->sd_ir == NULL || events == 0) return; switch (dev->board) { case CX23885_BOARD_HAUPPAUGE_HVR1270: case CX23885_BOARD_HAUPPAUGE_HVR1850: case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: case CX23885_BOARD_TBS_6980: case CX23885_BOARD_TBS_6981: case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_T980C: case CX23885_BOARD_DVBSKY_S950C: case CX23885_BOARD_TT_CT2_4500_CI: case CX23885_BOARD_DVBSKY_S950: case CX23885_BOARD_DVBSKY_S952: case CX23885_BOARD_DVBSKY_T982: case CX23885_BOARD_HAUPPAUGE_HVR1265_K4: /* * The only boards we handle right now. However other boards * using the CX2388x integrated IR controller should be similar */ break; default: return; } overrun = events & (V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN | V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN); data_available = events & (V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED | V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ); if (overrun) { /* If there was a FIFO overrun, stop the device */ v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); params.enable = false; /* Mitigate race with cx23885_input_ir_stop() */ params.shutdown = atomic_read(&dev->ir_input_stopping); v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); } if (data_available) cx23885_input_process_measurements(dev, overrun); if (overrun) { /* If there was a FIFO overrun, clear & restart the device */ params.enable = true; /* Mitigate race with cx23885_input_ir_stop() */ params.shutdown = atomic_read(&dev->ir_input_stopping); v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); } } static int cx23885_input_ir_start(struct cx23885_dev *dev) { struct v4l2_subdev_ir_parameters params; if (dev->sd_ir == NULL) return -ENODEV; atomic_set(&dev->ir_input_stopping, 0); v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); switch (dev->board) { case CX23885_BOARD_HAUPPAUGE_HVR1270: case CX23885_BOARD_HAUPPAUGE_HVR1850: case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_T980C: case CX23885_BOARD_DVBSKY_S950C: case CX23885_BOARD_TT_CT2_4500_CI: case CX23885_BOARD_DVBSKY_S950: case CX23885_BOARD_DVBSKY_S952: case CX23885_BOARD_DVBSKY_T982: case CX23885_BOARD_HAUPPAUGE_HVR1265_K4: /* * The IR controller on this board only returns pulse widths. * Any other mode setting will fail to set up the device. */ params.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; params.enable = true; params.interrupt_enable = true; params.shutdown = false; /* Setup for baseband compatible with both RC-5 and RC-6A */ params.modulation = false; /* RC-5: 2,222,222 ns = 1/36 kHz * 32 cycles * 2 marks * 1.25*/ /* RC-6A: 3,333,333 ns = 1/36 kHz * 16 cycles * 6 marks * 1.25*/ params.max_pulse_width = 3333333; /* ns */ /* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */ /* RC-6A: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */ params.noise_filter_min_width = 333333; /* ns */ /* * This board has inverted receive sense: * mark is received as low logic level; * falling edges are detected as rising edges; etc. */ params.invert_level = true; break; case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_TBS_6980: case CX23885_BOARD_TBS_6981: /* * The IR controller on this board only returns pulse widths. * Any other mode setting will fail to set up the device. */ params.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; params.enable = true; params.interrupt_enable = true; params.shutdown = false; /* Setup for a standard NEC protocol */ params.carrier_freq = 37917; /* Hz, 455 kHz/12 for NEC */ params.carrier_range_lower = 33000; /* Hz */ params.carrier_range_upper = 43000; /* Hz */ params.duty_cycle = 33; /* percent, 33 percent for NEC */ /* * NEC max pulse width: (64/3)/(455 kHz/12) * 16 nec_units * (64/3)/(455 kHz/12) * 16 nec_units * 1.375 = 12378022 ns */ params.max_pulse_width = 12378022; /* ns */ /* * NEC noise filter min width: (64/3)/(455 kHz/12) * 1 nec_unit * (64/3)/(455 kHz/12) * 1 nec_units * 0.625 = 351648 ns */ params.noise_filter_min_width = 351648; /* ns */ params.modulation = false; params.invert_level = true; break; } v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); return 0; } static int cx23885_input_ir_open(struct rc_dev *rc) { struct cx23885_kernel_ir *kernel_ir = rc->priv; if (kernel_ir->cx == NULL) return -ENODEV; return cx23885_input_ir_start(kernel_ir->cx); } static void cx23885_input_ir_stop(struct cx23885_dev *dev) { struct v4l2_subdev_ir_parameters params; if (dev->sd_ir == NULL) return; /* * Stop the sd_ir subdevice from generating notifications and * scheduling work. * It is shutdown this way in order to mitigate a race with * cx23885_input_rx_work_handler() in the overrun case, which could * re-enable the subdevice. */ atomic_set(&dev->ir_input_stopping, 1); v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); while (params.shutdown == false) { params.enable = false; params.interrupt_enable = false; params.shutdown = true; v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); } flush_work(&dev->cx25840_work); flush_work(&dev->ir_rx_work); flush_work(&dev->ir_tx_work); } static void cx23885_input_ir_close(struct rc_dev *rc) { struct cx23885_kernel_ir *kernel_ir = rc->priv; if (kernel_ir->cx != NULL) cx23885_input_ir_stop(kernel_ir->cx); } int cx23885_input_init(struct cx23885_dev *dev) { struct cx23885_kernel_ir *kernel_ir; struct rc_dev *rc; char *rc_map; u64 allowed_protos; int ret; /* * If the IR device (hardware registers, chip, GPIO lines, etc.) isn't * encapsulated in a v4l2_subdev, then I'm not going to deal with it. */ if (dev->sd_ir == NULL) return -ENODEV; switch (dev->board) { case CX23885_BOARD_HAUPPAUGE_HVR1270: case CX23885_BOARD_HAUPPAUGE_HVR1850: case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_HAUPPAUGE_HVR1265_K4: /* Integrated CX2388[58] IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* The grey Hauppauge RC-5 remote */ rc_map = RC_MAP_HAUPPAUGE; break; case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* The grey Terratec remote with orange buttons */ rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS; break; case CX23885_BOARD_TEVII_S470: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TEVII_NEC; break; case CX23885_BOARD_MYGICA_X8507: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02; break; case CX23885_BOARD_TBS_6980: case CX23885_BOARD_TBS_6981: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TBS_NEC; break; case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_T980C: case CX23885_BOARD_DVBSKY_S950C: case CX23885_BOARD_DVBSKY_S950: case CX23885_BOARD_DVBSKY_S952: case CX23885_BOARD_DVBSKY_T982: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; rc_map = RC_MAP_DVBSKY; break; case CX23885_BOARD_TT_CT2_4500_CI: /* Integrated CX23885 IR controller */ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; rc_map = RC_MAP_TT_1500; break; default: return -ENODEV; } /* cx23885 board instance kernel IR state */ kernel_ir = kzalloc(sizeof(struct cx23885_kernel_ir), GFP_KERNEL); if (kernel_ir == NULL) return -ENOMEM; kernel_ir->cx = dev; kernel_ir->name = kasprintf(GFP_KERNEL, "cx23885 IR (%s)", cx23885_boards[dev->board].name); if (!kernel_ir->name) { ret = -ENOMEM; goto err_out_free; } kernel_ir->phys = kasprintf(GFP_KERNEL, "pci-%s/ir0", pci_name(dev->pci)); if (!kernel_ir->phys) { ret = -ENOMEM; goto err_out_free_name; } /* input device */ rc = rc_allocate_device(RC_DRIVER_IR_RAW); if (!rc) { ret = -ENOMEM; goto err_out_free_phys; } kernel_ir->rc = rc; rc->device_name = kernel_ir->name; rc->input_phys = kernel_ir->phys; rc->input_id.bustype = BUS_PCI; rc->input_id.version = 1; if (dev->pci->subsystem_vendor) { rc->input_id.vendor = dev->pci->subsystem_vendor; rc->input_id.product = dev->pci->subsystem_device; } else { rc->input_id.vendor = dev->pci->vendor; rc->input_id.product = dev->pci->device; } rc->dev.parent = &dev->pci->dev; rc->allowed_protocols = allowed_protos; rc->priv = kernel_ir; rc->open = cx23885_input_ir_open; rc->close = cx23885_input_ir_close; rc->map_name = rc_map; rc->driver_name = MODULE_NAME; /* Go */ dev->kernel_ir = kernel_ir; ret = rc_register_device(rc); if (ret) goto err_out_stop; return 0; err_out_stop: cx23885_input_ir_stop(dev); dev->kernel_ir = NULL; rc_free_device(rc); err_out_free_phys: kfree(kernel_ir->phys); err_out_free_name: kfree(kernel_ir->name); err_out_free: kfree(kernel_ir); return ret; } void cx23885_input_fini(struct cx23885_dev *dev) { /* Always stop the IR hardware from generating interrupts */ cx23885_input_ir_stop(dev); if (dev->kernel_ir == NULL) return; rc_unregister_device(dev->kernel_ir->rc); kfree(dev->kernel_ir->phys); kfree(dev->kernel_ir->name); kfree(dev->kernel_ir); dev->kernel_ir = NULL; }
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <string.h> #include <ynl.h> #include <net/if.h> #include "ethtool-user.h" int main(int argc, char **argv) { struct ethtool_channels_get_req_dump creq = {}; struct ethtool_rings_get_req_dump rreq = {}; struct ethtool_channels_get_list *channels; struct ethtool_rings_get_list *rings; struct ynl_sock *ys; ys = ynl_sock_create(&ynl_ethtool_family, NULL); if (!ys) return 1; creq._present.header = 1; /* ethtool needs an empty nest, sigh */ channels = ethtool_channels_get_dump(ys, &creq); if (!channels) goto err_close; printf("Channels:\n"); ynl_dump_foreach(channels, dev) { printf(" %8s: ", dev->header.dev_name); if (dev->_present.rx_count) printf("rx %d ", dev->rx_count); if (dev->_present.tx_count) printf("tx %d ", dev->tx_count); if (dev->_present.combined_count) printf("combined %d ", dev->combined_count); printf("\n"); } ethtool_channels_get_list_free(channels); rreq._present.header = 1; /* ethtool needs an empty nest.. */ rings = ethtool_rings_get_dump(ys, &rreq); if (!rings) goto err_close; printf("Rings:\n"); ynl_dump_foreach(rings, dev) { printf(" %8s: ", dev->header.dev_name); if (dev->_present.rx) printf("rx %d ", dev->rx); if (dev->_present.tx) printf("tx %d ", dev->tx); printf("\n"); } ethtool_rings_get_list_free(rings); ynl_sock_destroy(ys); return 0; err_close: fprintf(stderr, "YNL (%d): %s\n", ys->err.code, ys->err.msg); ynl_sock_destroy(ys); return 2; }
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/mtd/mtd.h> #include <linux/sched/signal.h> static inline int mtdtest_relax(void) { cond_resched(); if (signal_pending(current)) { pr_info("aborting test due to pending signal!\n"); return -EINTR; } return 0; } int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum); int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt); int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt); int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf); int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size, const void *buf);
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2023 Intel Corporation */ #ifndef __INTEL_RPS_H__ #define __INTEL_RPS_H__ #define gen5_rps_irq_handler(x) ({}) #endif /* __INTEL_RPS_H__ */